Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.589
      1 /*	$NetBSD: if_wm.c,v 1.589 2018/10/05 08:23:58 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.589 2018/10/05 08:23:58 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    161 #else
    162 #define	DPRINTF(x, y)	/* nothing */
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544. We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 64 DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    203  * m_defrag() is called to reduce it.
    204  */
    205 #define	WM_NTXSEGS		64
    206 #define	WM_IFQUEUELEN		256
    207 #define	WM_TXQUEUELEN_MAX	64
    208 #define	WM_TXQUEUELEN_MAX_82547	16
    209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    212 #define	WM_NTXDESC_82542	256
    213 #define	WM_NTXDESC_82544	4096
    214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    219 
    220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    221 
    222 #define	WM_TXINTERQSIZE		256
    223 
    224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    226 #endif
    227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    229 #endif
    230 
    231 /*
    232  * Receive descriptor list size.  We have one Rx buffer for normal
    233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    234  * packet.  We allocate 256 receive descriptors, each with a 2k
    235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    236  */
    237 #define	WM_NRXDESC		256
    238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    241 
    242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 typedef union txdescs {
    250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    252 } txdescs_t;
    253 
    254 typedef union rxdescs {
    255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    258 } rxdescs_t;
    259 
    260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    262 
    263 /*
    264  * Software state for transmit jobs.
    265  */
    266 struct wm_txsoft {
    267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    269 	int txs_firstdesc;		/* first descriptor in packet */
    270 	int txs_lastdesc;		/* last descriptor in packet */
    271 	int txs_ndesc;			/* # of descriptors used */
    272 };
    273 
    274 /*
    275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    277  * them together.
    278  */
    279 struct wm_rxsoft {
    280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    282 };
    283 
    284 #define WM_LINKUP_TIMEOUT	50
    285 
    286 static uint16_t swfwphysem[] = {
    287 	SWFW_PHY0_SM,
    288 	SWFW_PHY1_SM,
    289 	SWFW_PHY2_SM,
    290 	SWFW_PHY3_SM
    291 };
    292 
    293 static const uint32_t wm_82580_rxpbs_table[] = {
    294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    295 };
    296 
    297 struct wm_softc;
    298 
    299 #ifdef WM_EVENT_COUNTERS
    300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    302 	struct evcnt qname##_ev_##evname;
    303 
    304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    305 	do {								\
    306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    308 		    "%s%02d%s", #qname, (qnum), #evname);		\
    309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    310 		    (evtype), NULL, (xname),				\
    311 		    (q)->qname##_##evname##_evcnt_name);		\
    312 	} while (0)
    313 
    314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    316 
    317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    319 
    320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    321 	evcnt_detach(&(q)->qname##_ev_##evname);
    322 #endif /* WM_EVENT_COUNTERS */
    323 
    324 struct wm_txqueue {
    325 	kmutex_t *txq_lock;		/* lock for tx operations */
    326 
    327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    328 
    329 	/* Software state for the transmit descriptors. */
    330 	int txq_num;			/* must be a power of two */
    331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    332 
    333 	/* TX control data structures. */
    334 	int txq_ndesc;			/* must be a power of two */
    335 	size_t txq_descsize;		/* a tx descriptor size */
    336 	txdescs_t *txq_descs_u;
    337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    339 	int txq_desc_rseg;		/* real number of control segment */
    340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    341 #define	txq_descs	txq_descs_u->sctxu_txdescs
    342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    343 
    344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    345 
    346 	int txq_free;			/* number of free Tx descriptors */
    347 	int txq_next;			/* next ready Tx descriptor */
    348 
    349 	int txq_sfree;			/* number of free Tx jobs */
    350 	int txq_snext;			/* next free Tx job */
    351 	int txq_sdirty;			/* dirty Tx jobs */
    352 
    353 	/* These 4 variables are used only on the 82547. */
    354 	int txq_fifo_size;		/* Tx FIFO size */
    355 	int txq_fifo_head;		/* current head of FIFO */
    356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    358 
    359 	/*
    360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    361 	 * CPUs. This queue intermediate them without block.
    362 	 */
    363 	pcq_t *txq_interq;
    364 
    365 	/*
    366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    367 	 * to manage Tx H/W queue's busy flag.
    368 	 */
    369 	int txq_flags;			/* flags for H/W queue, see below */
    370 #define	WM_TXQ_NO_SPACE	0x1
    371 
    372 	bool txq_stopping;
    373 
    374 	bool txq_sending;
    375 	time_t txq_lastsent;
    376 
    377 	uint32_t txq_packets;		/* for AIM */
    378 	uint32_t txq_bytes;		/* for AIM */
    379 #ifdef WM_EVENT_COUNTERS
    380 	/* TX event counters */
    381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    386 					    /* XXX not used? */
    387 
    388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    393 	WM_Q_EVCNT_DEFINE(txq, tsopain)     /* Painful header manip. for TSO */
    394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    396 					    /* other than toomanyseg */
    397 
    398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    401 
    402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    404 #endif /* WM_EVENT_COUNTERS */
    405 };
    406 
    407 struct wm_rxqueue {
    408 	kmutex_t *rxq_lock;		/* lock for rx operations */
    409 
    410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    411 
    412 	/* Software state for the receive descriptors. */
    413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    414 
    415 	/* RX control data structures. */
    416 	int rxq_ndesc;			/* must be a power of two */
    417 	size_t rxq_descsize;		/* a rx descriptor size */
    418 	rxdescs_t *rxq_descs_u;
    419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    421 	int rxq_desc_rseg;		/* real number of control segment */
    422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    426 
    427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    428 
    429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    430 	int rxq_discard;
    431 	int rxq_len;
    432 	struct mbuf *rxq_head;
    433 	struct mbuf *rxq_tail;
    434 	struct mbuf **rxq_tailp;
    435 
    436 	bool rxq_stopping;
    437 
    438 	uint32_t rxq_packets;		/* for AIM */
    439 	uint32_t rxq_bytes;		/* for AIM */
    440 #ifdef WM_EVENT_COUNTERS
    441 	/* RX event counters */
    442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    444 
    445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    447 #endif
    448 };
    449 
    450 struct wm_queue {
    451 	int wmq_id;			/* index of TX/RX queues */
    452 	int wmq_intr_idx;		/* index of MSI-X tables */
    453 
    454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    455 	bool wmq_set_itr;
    456 
    457 	struct wm_txqueue wmq_txq;
    458 	struct wm_rxqueue wmq_rxq;
    459 
    460 	void *wmq_si;
    461 };
    462 
    463 struct wm_phyop {
    464 	int (*acquire)(struct wm_softc *);
    465 	void (*release)(struct wm_softc *);
    466 	int reset_delay_us;
    467 };
    468 
    469 struct wm_nvmop {
    470 	int (*acquire)(struct wm_softc *);
    471 	void (*release)(struct wm_softc *);
    472 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    473 };
    474 
    475 /*
    476  * Software state per device.
    477  */
    478 struct wm_softc {
    479 	device_t sc_dev;		/* generic device information */
    480 	bus_space_tag_t sc_st;		/* bus space tag */
    481 	bus_space_handle_t sc_sh;	/* bus space handle */
    482 	bus_size_t sc_ss;		/* bus space size */
    483 	bus_space_tag_t sc_iot;		/* I/O space tag */
    484 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    485 	bus_size_t sc_ios;		/* I/O space size */
    486 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    487 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    488 	bus_size_t sc_flashs;		/* flash registers space size */
    489 	off_t sc_flashreg_offset;	/*
    490 					 * offset to flash registers from
    491 					 * start of BAR
    492 					 */
    493 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    494 
    495 	struct ethercom sc_ethercom;	/* ethernet common data */
    496 	struct mii_data sc_mii;		/* MII/media information */
    497 
    498 	pci_chipset_tag_t sc_pc;
    499 	pcitag_t sc_pcitag;
    500 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    501 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    502 
    503 	uint16_t sc_pcidevid;		/* PCI device ID */
    504 	wm_chip_type sc_type;		/* MAC type */
    505 	int sc_rev;			/* MAC revision */
    506 	wm_phy_type sc_phytype;		/* PHY type */
    507 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    508 #define	WM_MEDIATYPE_UNKNOWN		0x00
    509 #define	WM_MEDIATYPE_FIBER		0x01
    510 #define	WM_MEDIATYPE_COPPER		0x02
    511 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    512 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    513 	int sc_flags;			/* flags; see below */
    514 	int sc_if_flags;		/* last if_flags */
    515 	int sc_flowflags;		/* 802.3x flow control flags */
    516 	int sc_align_tweak;
    517 
    518 	void *sc_ihs[WM_MAX_NINTR];	/*
    519 					 * interrupt cookie.
    520 					 * - legacy and msi use sc_ihs[0] only
    521 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    522 					 */
    523 	pci_intr_handle_t *sc_intrs;	/*
    524 					 * legacy and msi use sc_intrs[0] only
    525 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    526 					 */
    527 	int sc_nintrs;			/* number of interrupts */
    528 
    529 	int sc_link_intr_idx;		/* index of MSI-X tables */
    530 
    531 	callout_t sc_tick_ch;		/* tick callout */
    532 	bool sc_core_stopping;
    533 
    534 	int sc_nvm_ver_major;
    535 	int sc_nvm_ver_minor;
    536 	int sc_nvm_ver_build;
    537 	int sc_nvm_addrbits;		/* NVM address bits */
    538 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    539 	int sc_ich8_flash_base;
    540 	int sc_ich8_flash_bank_size;
    541 	int sc_nvm_k1_enabled;
    542 
    543 	int sc_nqueues;
    544 	struct wm_queue *sc_queue;
    545 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    546 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    547 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    548 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    549 
    550 	int sc_affinity_offset;
    551 
    552 #ifdef WM_EVENT_COUNTERS
    553 	/* Event counters. */
    554 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    555 
    556 	/* WM_T_82542_2_1 only */
    557 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    558 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    559 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    560 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    561 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    562 #endif /* WM_EVENT_COUNTERS */
    563 
    564 	/* This variable are used only on the 82547. */
    565 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    566 
    567 	uint32_t sc_ctrl;		/* prototype CTRL register */
    568 #if 0
    569 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    570 #endif
    571 	uint32_t sc_icr;		/* prototype interrupt bits */
    572 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    573 	uint32_t sc_tctl;		/* prototype TCTL register */
    574 	uint32_t sc_rctl;		/* prototype RCTL register */
    575 	uint32_t sc_txcw;		/* prototype TXCW register */
    576 	uint32_t sc_tipg;		/* prototype TIPG register */
    577 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    578 	uint32_t sc_pba;		/* prototype PBA register */
    579 
    580 	int sc_tbi_linkup;		/* TBI link status */
    581 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    582 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    583 
    584 	int sc_mchash_type;		/* multicast filter offset */
    585 
    586 	krndsource_t rnd_source;	/* random source */
    587 
    588 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    589 
    590 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    591 	kmutex_t *sc_ich_phymtx;	/*
    592 					 * 82574/82583/ICH/PCH specific PHY
    593 					 * mutex. For 82574/82583, the mutex
    594 					 * is used for both PHY and NVM.
    595 					 */
    596 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    597 
    598 	struct wm_phyop phy;
    599 	struct wm_nvmop nvm;
    600 };
    601 
    602 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    603 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    604 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    605 
    606 #define	WM_RXCHAIN_RESET(rxq)						\
    607 do {									\
    608 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    609 	*(rxq)->rxq_tailp = NULL;					\
    610 	(rxq)->rxq_len = 0;						\
    611 } while (/*CONSTCOND*/0)
    612 
    613 #define	WM_RXCHAIN_LINK(rxq, m)						\
    614 do {									\
    615 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    616 	(rxq)->rxq_tailp = &(m)->m_next;				\
    617 } while (/*CONSTCOND*/0)
    618 
    619 #ifdef WM_EVENT_COUNTERS
    620 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    621 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    622 
    623 #define WM_Q_EVCNT_INCR(qname, evname)			\
    624 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    625 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    626 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    627 #else /* !WM_EVENT_COUNTERS */
    628 #define	WM_EVCNT_INCR(ev)	/* nothing */
    629 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    630 
    631 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    632 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    633 #endif /* !WM_EVENT_COUNTERS */
    634 
    635 #define	CSR_READ(sc, reg)						\
    636 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    637 #define	CSR_WRITE(sc, reg, val)						\
    638 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    639 #define	CSR_WRITE_FLUSH(sc)						\
    640 	(void) CSR_READ((sc), WMREG_STATUS)
    641 
    642 #define ICH8_FLASH_READ32(sc, reg)					\
    643 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    644 	    (reg) + sc->sc_flashreg_offset)
    645 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    646 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    647 	    (reg) + sc->sc_flashreg_offset, (data))
    648 
    649 #define ICH8_FLASH_READ16(sc, reg)					\
    650 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    651 	    (reg) + sc->sc_flashreg_offset)
    652 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    653 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    654 	    (reg) + sc->sc_flashreg_offset, (data))
    655 
    656 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    657 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    658 
    659 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    660 #define	WM_CDTXADDR_HI(txq, x)						\
    661 	(sizeof(bus_addr_t) == 8 ?					\
    662 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    663 
    664 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    665 #define	WM_CDRXADDR_HI(rxq, x)						\
    666 	(sizeof(bus_addr_t) == 8 ?					\
    667 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    668 
    669 /*
    670  * Register read/write functions.
    671  * Other than CSR_{READ|WRITE}().
    672  */
    673 #if 0
    674 static inline uint32_t wm_io_read(struct wm_softc *, int);
    675 #endif
    676 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    677 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    678     uint32_t, uint32_t);
    679 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    680 
    681 /*
    682  * Descriptor sync/init functions.
    683  */
    684 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    685 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    686 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    687 
    688 /*
    689  * Device driver interface functions and commonly used functions.
    690  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    691  */
    692 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    693 static int	wm_match(device_t, cfdata_t, void *);
    694 static void	wm_attach(device_t, device_t, void *);
    695 static int	wm_detach(device_t, int);
    696 static bool	wm_suspend(device_t, const pmf_qual_t *);
    697 static bool	wm_resume(device_t, const pmf_qual_t *);
    698 static void	wm_watchdog(struct ifnet *);
    699 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    700     uint16_t *);
    701 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    702     uint16_t *);
    703 static void	wm_tick(void *);
    704 static int	wm_ifflags_cb(struct ethercom *);
    705 static int	wm_ioctl(struct ifnet *, u_long, void *);
    706 /* MAC address related */
    707 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    708 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    709 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    710 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    711 static void	wm_set_filter(struct wm_softc *);
    712 /* Reset and init related */
    713 static void	wm_set_vlan(struct wm_softc *);
    714 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    715 static void	wm_get_auto_rd_done(struct wm_softc *);
    716 static void	wm_lan_init_done(struct wm_softc *);
    717 static void	wm_get_cfg_done(struct wm_softc *);
    718 static void	wm_phy_post_reset(struct wm_softc *);
    719 static void	wm_write_smbus_addr(struct wm_softc *);
    720 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    721 static void	wm_initialize_hardware_bits(struct wm_softc *);
    722 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    723 static void	wm_reset_phy(struct wm_softc *);
    724 static void	wm_flush_desc_rings(struct wm_softc *);
    725 static void	wm_reset(struct wm_softc *);
    726 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    727 static void	wm_rxdrain(struct wm_rxqueue *);
    728 static void	wm_init_rss(struct wm_softc *);
    729 static void	wm_adjust_qnum(struct wm_softc *, int);
    730 static inline bool	wm_is_using_msix(struct wm_softc *);
    731 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    732 static int	wm_softint_establish(struct wm_softc *, int, int);
    733 static int	wm_setup_legacy(struct wm_softc *);
    734 static int	wm_setup_msix(struct wm_softc *);
    735 static int	wm_init(struct ifnet *);
    736 static int	wm_init_locked(struct ifnet *);
    737 static void	wm_unset_stopping_flags(struct wm_softc *);
    738 static void	wm_set_stopping_flags(struct wm_softc *);
    739 static void	wm_stop(struct ifnet *, int);
    740 static void	wm_stop_locked(struct ifnet *, int);
    741 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    742 static void	wm_82547_txfifo_stall(void *);
    743 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    744 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    745 /* DMA related */
    746 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    747 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    748 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    749 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    750     struct wm_txqueue *);
    751 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    752 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    753 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    754     struct wm_rxqueue *);
    755 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    756 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    757 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    758 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    759 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    760 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    761 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    762     struct wm_txqueue *);
    763 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    764     struct wm_rxqueue *);
    765 static int	wm_alloc_txrx_queues(struct wm_softc *);
    766 static void	wm_free_txrx_queues(struct wm_softc *);
    767 static int	wm_init_txrx_queues(struct wm_softc *);
    768 /* Start */
    769 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    770     struct wm_txsoft *, uint32_t *, uint8_t *);
    771 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    772 static void	wm_start(struct ifnet *);
    773 static void	wm_start_locked(struct ifnet *);
    774 static int	wm_transmit(struct ifnet *, struct mbuf *);
    775 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    776 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    777     bool);
    778 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    779     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    780 static void	wm_nq_start(struct ifnet *);
    781 static void	wm_nq_start_locked(struct ifnet *);
    782 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    783 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    784 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    785     bool);
    786 static void	wm_deferred_start_locked(struct wm_txqueue *);
    787 static void	wm_handle_queue(void *);
    788 /* Interrupt */
    789 static bool	wm_txeof(struct wm_txqueue *, u_int);
    790 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    791 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    792 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    793 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    794 static void	wm_linkintr(struct wm_softc *, uint32_t);
    795 static int	wm_intr_legacy(void *);
    796 static inline void	wm_txrxintr_disable(struct wm_queue *);
    797 static inline void	wm_txrxintr_enable(struct wm_queue *);
    798 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    799 static int	wm_txrxintr_msix(void *);
    800 static int	wm_linkintr_msix(void *);
    801 
    802 /*
    803  * Media related.
    804  * GMII, SGMII, TBI, SERDES and SFP.
    805  */
    806 /* Common */
    807 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    808 /* GMII related */
    809 static void	wm_gmii_reset(struct wm_softc *);
    810 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    811 static int	wm_get_phy_id_82575(struct wm_softc *);
    812 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    813 static int	wm_gmii_mediachange(struct ifnet *);
    814 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    815 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    816 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    817 static int	wm_gmii_i82543_readreg(device_t, int, int);
    818 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    819 static int	wm_gmii_mdic_readreg(device_t, int, int);
    820 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    821 static int	wm_gmii_i82544_readreg(device_t, int, int);
    822 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    823 static int	wm_gmii_i80003_readreg(device_t, int, int);
    824 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    825 static int	wm_gmii_bm_readreg(device_t, int, int);
    826 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    827 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    828 static int	wm_gmii_hv_readreg(device_t, int, int);
    829 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    830 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    831 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    832 static int	wm_gmii_82580_readreg(device_t, int, int);
    833 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    834 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    835 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    836 static void	wm_gmii_statchg(struct ifnet *);
    837 /*
    838  * kumeran related (80003, ICH* and PCH*).
    839  * These functions are not for accessing MII registers but for accessing
    840  * kumeran specific registers.
    841  */
    842 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    843 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    844 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    845 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    846 /* SGMII */
    847 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    848 static int	wm_sgmii_readreg(device_t, int, int);
    849 static void	wm_sgmii_writereg(device_t, int, int, int);
    850 /* TBI related */
    851 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    852 static void	wm_tbi_mediainit(struct wm_softc *);
    853 static int	wm_tbi_mediachange(struct ifnet *);
    854 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    855 static int	wm_check_for_link(struct wm_softc *);
    856 static void	wm_tbi_tick(struct wm_softc *);
    857 /* SERDES related */
    858 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    859 static int	wm_serdes_mediachange(struct ifnet *);
    860 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    861 static void	wm_serdes_tick(struct wm_softc *);
    862 /* SFP related */
    863 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    864 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    865 
    866 /*
    867  * NVM related.
    868  * Microwire, SPI (w/wo EERD) and Flash.
    869  */
    870 /* Misc functions */
    871 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    872 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    873 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    874 /* Microwire */
    875 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    876 /* SPI */
    877 static int	wm_nvm_ready_spi(struct wm_softc *);
    878 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    879 /* Using with EERD */
    880 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    881 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    882 /* Flash */
    883 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    884     unsigned int *);
    885 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    886 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    887 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    888     uint32_t *);
    889 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    890 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    891 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    892 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    893 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    894 /* iNVM */
    895 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    896 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    897 /* Lock, detecting NVM type, validate checksum and read */
    898 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    899 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    900 static int	wm_nvm_validate_checksum(struct wm_softc *);
    901 static void	wm_nvm_version_invm(struct wm_softc *);
    902 static void	wm_nvm_version(struct wm_softc *);
    903 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    904 
    905 /*
    906  * Hardware semaphores.
    907  * Very complexed...
    908  */
    909 static int	wm_get_null(struct wm_softc *);
    910 static void	wm_put_null(struct wm_softc *);
    911 static int	wm_get_eecd(struct wm_softc *);
    912 static void	wm_put_eecd(struct wm_softc *);
    913 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    914 static void	wm_put_swsm_semaphore(struct wm_softc *);
    915 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    916 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    917 static int	wm_get_nvm_80003(struct wm_softc *);
    918 static void	wm_put_nvm_80003(struct wm_softc *);
    919 static int	wm_get_nvm_82571(struct wm_softc *);
    920 static void	wm_put_nvm_82571(struct wm_softc *);
    921 static int	wm_get_phy_82575(struct wm_softc *);
    922 static void	wm_put_phy_82575(struct wm_softc *);
    923 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    924 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    925 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    926 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    927 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    928 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    929 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    930 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    931 
    932 /*
    933  * Management mode and power management related subroutines.
    934  * BMC, AMT, suspend/resume and EEE.
    935  */
    936 #if 0
    937 static int	wm_check_mng_mode(struct wm_softc *);
    938 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    939 static int	wm_check_mng_mode_82574(struct wm_softc *);
    940 static int	wm_check_mng_mode_generic(struct wm_softc *);
    941 #endif
    942 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    943 static bool	wm_phy_resetisblocked(struct wm_softc *);
    944 static void	wm_get_hw_control(struct wm_softc *);
    945 static void	wm_release_hw_control(struct wm_softc *);
    946 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    947 static void	wm_smbustopci(struct wm_softc *);
    948 static void	wm_init_manageability(struct wm_softc *);
    949 static void	wm_release_manageability(struct wm_softc *);
    950 static void	wm_get_wakeup(struct wm_softc *);
    951 static void	wm_ulp_disable(struct wm_softc *);
    952 static void	wm_enable_phy_wakeup(struct wm_softc *);
    953 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    954 static void	wm_enable_wakeup(struct wm_softc *);
    955 static void	wm_disable_aspm(struct wm_softc *);
    956 /* LPLU (Low Power Link Up) */
    957 static void	wm_lplu_d0_disable(struct wm_softc *);
    958 /* EEE */
    959 static void	wm_set_eee_i350(struct wm_softc *);
    960 
    961 /*
    962  * Workarounds (mainly PHY related).
    963  * Basically, PHY's workarounds are in the PHY drivers.
    964  */
    965 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    966 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    967 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    968 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    969 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    970 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    971 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    972 static void	wm_reset_init_script_82575(struct wm_softc *);
    973 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    974 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    975 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    976 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    977 static void	wm_pll_workaround_i210(struct wm_softc *);
    978 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    979 
    980 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    981     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    982 
    983 /*
    984  * Devices supported by this driver.
    985  */
    986 static const struct wm_product {
    987 	pci_vendor_id_t		wmp_vendor;
    988 	pci_product_id_t	wmp_product;
    989 	const char		*wmp_name;
    990 	wm_chip_type		wmp_type;
    991 	uint32_t		wmp_flags;
    992 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    993 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    994 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    995 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    996 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    997 } wm_products[] = {
    998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    999 	  "Intel i82542 1000BASE-X Ethernet",
   1000 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1001 
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1003 	  "Intel i82543GC 1000BASE-X Ethernet",
   1004 	  WM_T_82543,		WMP_F_FIBER },
   1005 
   1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1007 	  "Intel i82543GC 1000BASE-T Ethernet",
   1008 	  WM_T_82543,		WMP_F_COPPER },
   1009 
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1011 	  "Intel i82544EI 1000BASE-T Ethernet",
   1012 	  WM_T_82544,		WMP_F_COPPER },
   1013 
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1015 	  "Intel i82544EI 1000BASE-X Ethernet",
   1016 	  WM_T_82544,		WMP_F_FIBER },
   1017 
   1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1019 	  "Intel i82544GC 1000BASE-T Ethernet",
   1020 	  WM_T_82544,		WMP_F_COPPER },
   1021 
   1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1023 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1024 	  WM_T_82544,		WMP_F_COPPER },
   1025 
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1027 	  "Intel i82540EM 1000BASE-T Ethernet",
   1028 	  WM_T_82540,		WMP_F_COPPER },
   1029 
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1031 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1032 	  WM_T_82540,		WMP_F_COPPER },
   1033 
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1035 	  "Intel i82540EP 1000BASE-T Ethernet",
   1036 	  WM_T_82540,		WMP_F_COPPER },
   1037 
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1039 	  "Intel i82540EP 1000BASE-T Ethernet",
   1040 	  WM_T_82540,		WMP_F_COPPER },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1043 	  "Intel i82540EP 1000BASE-T Ethernet",
   1044 	  WM_T_82540,		WMP_F_COPPER },
   1045 
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1047 	  "Intel i82545EM 1000BASE-T Ethernet",
   1048 	  WM_T_82545,		WMP_F_COPPER },
   1049 
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1051 	  "Intel i82545GM 1000BASE-T Ethernet",
   1052 	  WM_T_82545_3,		WMP_F_COPPER },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1055 	  "Intel i82545GM 1000BASE-X Ethernet",
   1056 	  WM_T_82545_3,		WMP_F_FIBER },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1059 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1060 	  WM_T_82545_3,		WMP_F_SERDES },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1063 	  "Intel i82546EB 1000BASE-T Ethernet",
   1064 	  WM_T_82546,		WMP_F_COPPER },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1067 	  "Intel i82546EB 1000BASE-T Ethernet",
   1068 	  WM_T_82546,		WMP_F_COPPER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1071 	  "Intel i82545EM 1000BASE-X Ethernet",
   1072 	  WM_T_82545,		WMP_F_FIBER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1075 	  "Intel i82546EB 1000BASE-X Ethernet",
   1076 	  WM_T_82546,		WMP_F_FIBER },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1079 	  "Intel i82546GB 1000BASE-T Ethernet",
   1080 	  WM_T_82546_3,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1083 	  "Intel i82546GB 1000BASE-X Ethernet",
   1084 	  WM_T_82546_3,		WMP_F_FIBER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1087 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1088 	  WM_T_82546_3,		WMP_F_SERDES },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1091 	  "i82546GB quad-port Gigabit Ethernet",
   1092 	  WM_T_82546_3,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1095 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1096 	  WM_T_82546_3,		WMP_F_COPPER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1099 	  "Intel PRO/1000MT (82546GB)",
   1100 	  WM_T_82546_3,		WMP_F_COPPER },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1103 	  "Intel i82541EI 1000BASE-T Ethernet",
   1104 	  WM_T_82541,		WMP_F_COPPER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1107 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1108 	  WM_T_82541,		WMP_F_COPPER },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1111 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1112 	  WM_T_82541,		WMP_F_COPPER },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1115 	  "Intel i82541ER 1000BASE-T Ethernet",
   1116 	  WM_T_82541_2,		WMP_F_COPPER },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1119 	  "Intel i82541GI 1000BASE-T Ethernet",
   1120 	  WM_T_82541_2,		WMP_F_COPPER },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1123 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1124 	  WM_T_82541_2,		WMP_F_COPPER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1127 	  "Intel i82541PI 1000BASE-T Ethernet",
   1128 	  WM_T_82541_2,		WMP_F_COPPER },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1131 	  "Intel i82547EI 1000BASE-T Ethernet",
   1132 	  WM_T_82547,		WMP_F_COPPER },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1135 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1136 	  WM_T_82547,		WMP_F_COPPER },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1139 	  "Intel i82547GI 1000BASE-T Ethernet",
   1140 	  WM_T_82547_2,		WMP_F_COPPER },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1143 	  "Intel PRO/1000 PT (82571EB)",
   1144 	  WM_T_82571,		WMP_F_COPPER },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1147 	  "Intel PRO/1000 PF (82571EB)",
   1148 	  WM_T_82571,		WMP_F_FIBER },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1151 	  "Intel PRO/1000 PB (82571EB)",
   1152 	  WM_T_82571,		WMP_F_SERDES },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1155 	  "Intel PRO/1000 QT (82571EB)",
   1156 	  WM_T_82571,		WMP_F_COPPER },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1159 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1160 	  WM_T_82571,		WMP_F_COPPER, },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1163 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1164 	  WM_T_82571,		WMP_F_COPPER, },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1167 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1168 	  WM_T_82571,		WMP_F_SERDES, },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1171 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1172 	  WM_T_82571,		WMP_F_SERDES, },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1175 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1176 	  WM_T_82571,		WMP_F_FIBER, },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1179 	  "Intel i82572EI 1000baseT Ethernet",
   1180 	  WM_T_82572,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1183 	  "Intel i82572EI 1000baseX Ethernet",
   1184 	  WM_T_82572,		WMP_F_FIBER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1187 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1188 	  WM_T_82572,		WMP_F_SERDES },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1191 	  "Intel i82572EI 1000baseT Ethernet",
   1192 	  WM_T_82572,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1195 	  "Intel i82573E",
   1196 	  WM_T_82573,		WMP_F_COPPER },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1199 	  "Intel i82573E IAMT",
   1200 	  WM_T_82573,		WMP_F_COPPER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1203 	  "Intel i82573L Gigabit Ethernet",
   1204 	  WM_T_82573,		WMP_F_COPPER },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1207 	  "Intel i82574L",
   1208 	  WM_T_82574,		WMP_F_COPPER },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1211 	  "Intel i82574L",
   1212 	  WM_T_82574,		WMP_F_COPPER },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1215 	  "Intel i82583V",
   1216 	  WM_T_82583,		WMP_F_COPPER },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1219 	  "i80003 dual 1000baseT Ethernet",
   1220 	  WM_T_80003,		WMP_F_COPPER },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1223 	  "i80003 dual 1000baseX Ethernet",
   1224 	  WM_T_80003,		WMP_F_COPPER },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1227 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1228 	  WM_T_80003,		WMP_F_SERDES },
   1229 
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1231 	  "Intel i80003 1000baseT Ethernet",
   1232 	  WM_T_80003,		WMP_F_COPPER },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1235 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1236 	  WM_T_80003,		WMP_F_SERDES },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1239 	  "Intel i82801H (M_AMT) LAN Controller",
   1240 	  WM_T_ICH8,		WMP_F_COPPER },
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1242 	  "Intel i82801H (AMT) LAN Controller",
   1243 	  WM_T_ICH8,		WMP_F_COPPER },
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1245 	  "Intel i82801H LAN Controller",
   1246 	  WM_T_ICH8,		WMP_F_COPPER },
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1248 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1249 	  WM_T_ICH8,		WMP_F_COPPER },
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1251 	  "Intel i82801H (M) LAN Controller",
   1252 	  WM_T_ICH8,		WMP_F_COPPER },
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1254 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1255 	  WM_T_ICH8,		WMP_F_COPPER },
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1257 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1258 	  WM_T_ICH8,		WMP_F_COPPER },
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1260 	  "82567V-3 LAN Controller",
   1261 	  WM_T_ICH8,		WMP_F_COPPER },
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1263 	  "82801I (AMT) LAN Controller",
   1264 	  WM_T_ICH9,		WMP_F_COPPER },
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1266 	  "82801I 10/100 LAN Controller",
   1267 	  WM_T_ICH9,		WMP_F_COPPER },
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1269 	  "82801I (G) 10/100 LAN Controller",
   1270 	  WM_T_ICH9,		WMP_F_COPPER },
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1272 	  "82801I (GT) 10/100 LAN Controller",
   1273 	  WM_T_ICH9,		WMP_F_COPPER },
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1275 	  "82801I (C) LAN Controller",
   1276 	  WM_T_ICH9,		WMP_F_COPPER },
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1278 	  "82801I mobile LAN Controller",
   1279 	  WM_T_ICH9,		WMP_F_COPPER },
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1281 	  "82801I mobile (V) LAN Controller",
   1282 	  WM_T_ICH9,		WMP_F_COPPER },
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1284 	  "82801I mobile (AMT) LAN Controller",
   1285 	  WM_T_ICH9,		WMP_F_COPPER },
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1287 	  "82567LM-4 LAN Controller",
   1288 	  WM_T_ICH9,		WMP_F_COPPER },
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1290 	  "82567LM-2 LAN Controller",
   1291 	  WM_T_ICH10,		WMP_F_COPPER },
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1293 	  "82567LF-2 LAN Controller",
   1294 	  WM_T_ICH10,		WMP_F_COPPER },
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1296 	  "82567LM-3 LAN Controller",
   1297 	  WM_T_ICH10,		WMP_F_COPPER },
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1299 	  "82567LF-3 LAN Controller",
   1300 	  WM_T_ICH10,		WMP_F_COPPER },
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1302 	  "82567V-2 LAN Controller",
   1303 	  WM_T_ICH10,		WMP_F_COPPER },
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1305 	  "82567V-3? LAN Controller",
   1306 	  WM_T_ICH10,		WMP_F_COPPER },
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1308 	  "HANKSVILLE LAN Controller",
   1309 	  WM_T_ICH10,		WMP_F_COPPER },
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1311 	  "PCH LAN (82577LM) Controller",
   1312 	  WM_T_PCH,		WMP_F_COPPER },
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1314 	  "PCH LAN (82577LC) Controller",
   1315 	  WM_T_PCH,		WMP_F_COPPER },
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1317 	  "PCH LAN (82578DM) Controller",
   1318 	  WM_T_PCH,		WMP_F_COPPER },
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1320 	  "PCH LAN (82578DC) Controller",
   1321 	  WM_T_PCH,		WMP_F_COPPER },
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1323 	  "PCH2 LAN (82579LM) Controller",
   1324 	  WM_T_PCH2,		WMP_F_COPPER },
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1326 	  "PCH2 LAN (82579V) Controller",
   1327 	  WM_T_PCH2,		WMP_F_COPPER },
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1329 	  "82575EB dual-1000baseT Ethernet",
   1330 	  WM_T_82575,		WMP_F_COPPER },
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1332 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1333 	  WM_T_82575,		WMP_F_SERDES },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1335 	  "82575GB quad-1000baseT Ethernet",
   1336 	  WM_T_82575,		WMP_F_COPPER },
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1338 	  "82575GB quad-1000baseT Ethernet (PM)",
   1339 	  WM_T_82575,		WMP_F_COPPER },
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1341 	  "82576 1000BaseT Ethernet",
   1342 	  WM_T_82576,		WMP_F_COPPER },
   1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1344 	  "82576 1000BaseX Ethernet",
   1345 	  WM_T_82576,		WMP_F_FIBER },
   1346 
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1348 	  "82576 gigabit Ethernet (SERDES)",
   1349 	  WM_T_82576,		WMP_F_SERDES },
   1350 
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1352 	  "82576 quad-1000BaseT Ethernet",
   1353 	  WM_T_82576,		WMP_F_COPPER },
   1354 
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1356 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1357 	  WM_T_82576,		WMP_F_COPPER },
   1358 
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1360 	  "82576 gigabit Ethernet",
   1361 	  WM_T_82576,		WMP_F_COPPER },
   1362 
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1364 	  "82576 gigabit Ethernet (SERDES)",
   1365 	  WM_T_82576,		WMP_F_SERDES },
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1367 	  "82576 quad-gigabit Ethernet (SERDES)",
   1368 	  WM_T_82576,		WMP_F_SERDES },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1371 	  "82580 1000BaseT Ethernet",
   1372 	  WM_T_82580,		WMP_F_COPPER },
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1374 	  "82580 1000BaseX Ethernet",
   1375 	  WM_T_82580,		WMP_F_FIBER },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1378 	  "82580 1000BaseT Ethernet (SERDES)",
   1379 	  WM_T_82580,		WMP_F_SERDES },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1382 	  "82580 gigabit Ethernet (SGMII)",
   1383 	  WM_T_82580,		WMP_F_COPPER },
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1385 	  "82580 dual-1000BaseT Ethernet",
   1386 	  WM_T_82580,		WMP_F_COPPER },
   1387 
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1389 	  "82580 quad-1000BaseX Ethernet",
   1390 	  WM_T_82580,		WMP_F_FIBER },
   1391 
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1393 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1394 	  WM_T_82580,		WMP_F_COPPER },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1397 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1398 	  WM_T_82580,		WMP_F_SERDES },
   1399 
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1401 	  "DH89XXCC 1000BASE-KX Ethernet",
   1402 	  WM_T_82580,		WMP_F_SERDES },
   1403 
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1405 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1406 	  WM_T_82580,		WMP_F_SERDES },
   1407 
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1409 	  "I350 Gigabit Network Connection",
   1410 	  WM_T_I350,		WMP_F_COPPER },
   1411 
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1413 	  "I350 Gigabit Fiber Network Connection",
   1414 	  WM_T_I350,		WMP_F_FIBER },
   1415 
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1417 	  "I350 Gigabit Backplane Connection",
   1418 	  WM_T_I350,		WMP_F_SERDES },
   1419 
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1421 	  "I350 Quad Port Gigabit Ethernet",
   1422 	  WM_T_I350,		WMP_F_SERDES },
   1423 
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1425 	  "I350 Gigabit Connection",
   1426 	  WM_T_I350,		WMP_F_COPPER },
   1427 
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1429 	  "I354 Gigabit Ethernet (KX)",
   1430 	  WM_T_I354,		WMP_F_SERDES },
   1431 
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1433 	  "I354 Gigabit Ethernet (SGMII)",
   1434 	  WM_T_I354,		WMP_F_COPPER },
   1435 
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1437 	  "I354 Gigabit Ethernet (2.5G)",
   1438 	  WM_T_I354,		WMP_F_COPPER },
   1439 
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1441 	  "I210-T1 Ethernet Server Adapter",
   1442 	  WM_T_I210,		WMP_F_COPPER },
   1443 
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1445 	  "I210 Ethernet (Copper OEM)",
   1446 	  WM_T_I210,		WMP_F_COPPER },
   1447 
   1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1449 	  "I210 Ethernet (Copper IT)",
   1450 	  WM_T_I210,		WMP_F_COPPER },
   1451 
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1453 	  "I210 Ethernet (FLASH less)",
   1454 	  WM_T_I210,		WMP_F_COPPER },
   1455 
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1457 	  "I210 Gigabit Ethernet (Fiber)",
   1458 	  WM_T_I210,		WMP_F_FIBER },
   1459 
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1461 	  "I210 Gigabit Ethernet (SERDES)",
   1462 	  WM_T_I210,		WMP_F_SERDES },
   1463 
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1465 	  "I210 Gigabit Ethernet (FLASH less)",
   1466 	  WM_T_I210,		WMP_F_SERDES },
   1467 
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1469 	  "I210 Gigabit Ethernet (SGMII)",
   1470 	  WM_T_I210,		WMP_F_COPPER },
   1471 
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1473 	  "I211 Ethernet (COPPER)",
   1474 	  WM_T_I211,		WMP_F_COPPER },
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1476 	  "I217 V Ethernet Connection",
   1477 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1479 	  "I217 LM Ethernet Connection",
   1480 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1482 	  "I218 V Ethernet Connection",
   1483 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1484 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1485 	  "I218 V Ethernet Connection",
   1486 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1488 	  "I218 V Ethernet Connection",
   1489 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1491 	  "I218 LM Ethernet Connection",
   1492 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1494 	  "I218 LM Ethernet Connection",
   1495 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1496 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1497 	  "I218 LM Ethernet Connection",
   1498 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1500 	  "I219 V Ethernet Connection",
   1501 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1503 	  "I219 V Ethernet Connection",
   1504 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1505 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1506 	  "I219 V Ethernet Connection",
   1507 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1508 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1509 	  "I219 V Ethernet Connection",
   1510 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1512 	  "I219 LM Ethernet Connection",
   1513 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1515 	  "I219 LM Ethernet Connection",
   1516 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1518 	  "I219 LM Ethernet Connection",
   1519 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1520 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1521 	  "I219 LM Ethernet Connection",
   1522 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1524 	  "I219 LM Ethernet Connection",
   1525 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1526 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1527 	  "I219 V Ethernet Connection",
   1528 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1530 	  "I219 V Ethernet Connection",
   1531 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1532 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1533 	  "I219 LM Ethernet Connection",
   1534 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1536 	  "I219 LM Ethernet Connection",
   1537 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1538 	{ 0,			0,
   1539 	  NULL,
   1540 	  0,			0 },
   1541 };
   1542 
   1543 /*
   1544  * Register read/write functions.
   1545  * Other than CSR_{READ|WRITE}().
   1546  */
   1547 
   1548 #if 0 /* Not currently used */
   1549 static inline uint32_t
   1550 wm_io_read(struct wm_softc *sc, int reg)
   1551 {
   1552 
   1553 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1554 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1555 }
   1556 #endif
   1557 
   1558 static inline void
   1559 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1560 {
   1561 
   1562 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1563 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1564 }
   1565 
   1566 static inline void
   1567 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1568     uint32_t data)
   1569 {
   1570 	uint32_t regval;
   1571 	int i;
   1572 
   1573 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1574 
   1575 	CSR_WRITE(sc, reg, regval);
   1576 
   1577 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1578 		delay(5);
   1579 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1580 			break;
   1581 	}
   1582 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1583 		aprint_error("%s: WARNING:"
   1584 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1585 		    device_xname(sc->sc_dev), reg);
   1586 	}
   1587 }
   1588 
   1589 static inline void
   1590 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1591 {
   1592 	wa->wa_low = htole32(v & 0xffffffffU);
   1593 	if (sizeof(bus_addr_t) == 8)
   1594 		wa->wa_high = htole32((uint64_t) v >> 32);
   1595 	else
   1596 		wa->wa_high = 0;
   1597 }
   1598 
   1599 /*
   1600  * Descriptor sync/init functions.
   1601  */
   1602 static inline void
   1603 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1604 {
   1605 	struct wm_softc *sc = txq->txq_sc;
   1606 
   1607 	/* If it will wrap around, sync to the end of the ring. */
   1608 	if ((start + num) > WM_NTXDESC(txq)) {
   1609 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1610 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1611 		    (WM_NTXDESC(txq) - start), ops);
   1612 		num -= (WM_NTXDESC(txq) - start);
   1613 		start = 0;
   1614 	}
   1615 
   1616 	/* Now sync whatever is left. */
   1617 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1618 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1619 }
   1620 
   1621 static inline void
   1622 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1623 {
   1624 	struct wm_softc *sc = rxq->rxq_sc;
   1625 
   1626 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1627 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1628 }
   1629 
   1630 static inline void
   1631 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1632 {
   1633 	struct wm_softc *sc = rxq->rxq_sc;
   1634 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1635 	struct mbuf *m = rxs->rxs_mbuf;
   1636 
   1637 	/*
   1638 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1639 	 * so that the payload after the Ethernet header is aligned
   1640 	 * to a 4-byte boundary.
   1641 
   1642 	 * XXX BRAINDAMAGE ALERT!
   1643 	 * The stupid chip uses the same size for every buffer, which
   1644 	 * is set in the Receive Control register.  We are using the 2K
   1645 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1646 	 * reason, we can't "scoot" packets longer than the standard
   1647 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1648 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1649 	 * the upper layer copy the headers.
   1650 	 */
   1651 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1652 
   1653 	if (sc->sc_type == WM_T_82574) {
   1654 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1655 		rxd->erx_data.erxd_addr =
   1656 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1657 		rxd->erx_data.erxd_dd = 0;
   1658 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1659 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1660 
   1661 		rxd->nqrx_data.nrxd_paddr =
   1662 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1663 		/* Currently, split header is not supported. */
   1664 		rxd->nqrx_data.nrxd_haddr = 0;
   1665 	} else {
   1666 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1667 
   1668 		wm_set_dma_addr(&rxd->wrx_addr,
   1669 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1670 		rxd->wrx_len = 0;
   1671 		rxd->wrx_cksum = 0;
   1672 		rxd->wrx_status = 0;
   1673 		rxd->wrx_errors = 0;
   1674 		rxd->wrx_special = 0;
   1675 	}
   1676 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1677 
   1678 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1679 }
   1680 
   1681 /*
   1682  * Device driver interface functions and commonly used functions.
   1683  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1684  */
   1685 
   1686 /* Lookup supported device table */
   1687 static const struct wm_product *
   1688 wm_lookup(const struct pci_attach_args *pa)
   1689 {
   1690 	const struct wm_product *wmp;
   1691 
   1692 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1693 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1694 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1695 			return wmp;
   1696 	}
   1697 	return NULL;
   1698 }
   1699 
   1700 /* The match function (ca_match) */
   1701 static int
   1702 wm_match(device_t parent, cfdata_t cf, void *aux)
   1703 {
   1704 	struct pci_attach_args *pa = aux;
   1705 
   1706 	if (wm_lookup(pa) != NULL)
   1707 		return 1;
   1708 
   1709 	return 0;
   1710 }
   1711 
   1712 /* The attach function (ca_attach) */
   1713 static void
   1714 wm_attach(device_t parent, device_t self, void *aux)
   1715 {
   1716 	struct wm_softc *sc = device_private(self);
   1717 	struct pci_attach_args *pa = aux;
   1718 	prop_dictionary_t dict;
   1719 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1720 	pci_chipset_tag_t pc = pa->pa_pc;
   1721 	int counts[PCI_INTR_TYPE_SIZE];
   1722 	pci_intr_type_t max_type;
   1723 	const char *eetype, *xname;
   1724 	bus_space_tag_t memt;
   1725 	bus_space_handle_t memh;
   1726 	bus_size_t memsize;
   1727 	int memh_valid;
   1728 	int i, error;
   1729 	const struct wm_product *wmp;
   1730 	prop_data_t ea;
   1731 	prop_number_t pn;
   1732 	uint8_t enaddr[ETHER_ADDR_LEN];
   1733 	char buf[256];
   1734 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1735 	pcireg_t preg, memtype;
   1736 	uint16_t eeprom_data, apme_mask;
   1737 	bool force_clear_smbi;
   1738 	uint32_t link_mode;
   1739 	uint32_t reg;
   1740 
   1741 	sc->sc_dev = self;
   1742 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1743 	sc->sc_core_stopping = false;
   1744 
   1745 	wmp = wm_lookup(pa);
   1746 #ifdef DIAGNOSTIC
   1747 	if (wmp == NULL) {
   1748 		printf("\n");
   1749 		panic("wm_attach: impossible");
   1750 	}
   1751 #endif
   1752 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1753 
   1754 	sc->sc_pc = pa->pa_pc;
   1755 	sc->sc_pcitag = pa->pa_tag;
   1756 
   1757 	if (pci_dma64_available(pa))
   1758 		sc->sc_dmat = pa->pa_dmat64;
   1759 	else
   1760 		sc->sc_dmat = pa->pa_dmat;
   1761 
   1762 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1763 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1764 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1765 
   1766 	sc->sc_type = wmp->wmp_type;
   1767 
   1768 	/* Set default function pointers */
   1769 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1770 	sc->phy.release = sc->nvm.release = wm_put_null;
   1771 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1772 
   1773 	if (sc->sc_type < WM_T_82543) {
   1774 		if (sc->sc_rev < 2) {
   1775 			aprint_error_dev(sc->sc_dev,
   1776 			    "i82542 must be at least rev. 2\n");
   1777 			return;
   1778 		}
   1779 		if (sc->sc_rev < 3)
   1780 			sc->sc_type = WM_T_82542_2_0;
   1781 	}
   1782 
   1783 	/*
   1784 	 * Disable MSI for Errata:
   1785 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1786 	 *
   1787 	 *  82544: Errata 25
   1788 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1789 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1790 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1791 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1792 	 *
   1793 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1794 	 *
   1795 	 *  82571 & 82572: Errata 63
   1796 	 */
   1797 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1798 	    || (sc->sc_type == WM_T_82572))
   1799 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1800 
   1801 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1802 	    || (sc->sc_type == WM_T_82580)
   1803 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1804 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1805 		sc->sc_flags |= WM_F_NEWQUEUE;
   1806 
   1807 	/* Set device properties (mactype) */
   1808 	dict = device_properties(sc->sc_dev);
   1809 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1810 
   1811 	/*
   1812 	 * Map the device.  All devices support memory-mapped acccess,
   1813 	 * and it is really required for normal operation.
   1814 	 */
   1815 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1816 	switch (memtype) {
   1817 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1818 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1819 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1820 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1821 		break;
   1822 	default:
   1823 		memh_valid = 0;
   1824 		break;
   1825 	}
   1826 
   1827 	if (memh_valid) {
   1828 		sc->sc_st = memt;
   1829 		sc->sc_sh = memh;
   1830 		sc->sc_ss = memsize;
   1831 	} else {
   1832 		aprint_error_dev(sc->sc_dev,
   1833 		    "unable to map device registers\n");
   1834 		return;
   1835 	}
   1836 
   1837 	/*
   1838 	 * In addition, i82544 and later support I/O mapped indirect
   1839 	 * register access.  It is not desirable (nor supported in
   1840 	 * this driver) to use it for normal operation, though it is
   1841 	 * required to work around bugs in some chip versions.
   1842 	 */
   1843 	if (sc->sc_type >= WM_T_82544) {
   1844 		/* First we have to find the I/O BAR. */
   1845 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1846 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1847 			if (memtype == PCI_MAPREG_TYPE_IO)
   1848 				break;
   1849 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1850 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1851 				i += 4;	/* skip high bits, too */
   1852 		}
   1853 		if (i < PCI_MAPREG_END) {
   1854 			/*
   1855 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1856 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1857 			 * It's no problem because newer chips has no this
   1858 			 * bug.
   1859 			 *
   1860 			 * The i8254x doesn't apparently respond when the
   1861 			 * I/O BAR is 0, which looks somewhat like it's not
   1862 			 * been configured.
   1863 			 */
   1864 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1865 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1866 				aprint_error_dev(sc->sc_dev,
   1867 				    "WARNING: I/O BAR at zero.\n");
   1868 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1869 					0, &sc->sc_iot, &sc->sc_ioh,
   1870 					NULL, &sc->sc_ios) == 0) {
   1871 				sc->sc_flags |= WM_F_IOH_VALID;
   1872 			} else {
   1873 				aprint_error_dev(sc->sc_dev,
   1874 				    "WARNING: unable to map I/O space\n");
   1875 			}
   1876 		}
   1877 
   1878 	}
   1879 
   1880 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1881 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1882 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1883 	if (sc->sc_type < WM_T_82542_2_1)
   1884 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1885 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1886 
   1887 	/* power up chip */
   1888 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1889 	    && error != EOPNOTSUPP) {
   1890 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1891 		return;
   1892 	}
   1893 
   1894 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1895 	/*
   1896 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1897 	 * resource.
   1898 	 */
   1899 	if (sc->sc_nqueues > 1) {
   1900 		max_type = PCI_INTR_TYPE_MSIX;
   1901 		/*
   1902 		 *  82583 has a MSI-X capability in the PCI configuration space
   1903 		 * but it doesn't support it. At least the document doesn't
   1904 		 * say anything about MSI-X.
   1905 		 */
   1906 		counts[PCI_INTR_TYPE_MSIX]
   1907 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1908 	} else {
   1909 		max_type = PCI_INTR_TYPE_MSI;
   1910 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1911 	}
   1912 
   1913 	/* Allocation settings */
   1914 	counts[PCI_INTR_TYPE_MSI] = 1;
   1915 	counts[PCI_INTR_TYPE_INTX] = 1;
   1916 	/* overridden by disable flags */
   1917 	if (wm_disable_msi != 0) {
   1918 		counts[PCI_INTR_TYPE_MSI] = 0;
   1919 		if (wm_disable_msix != 0) {
   1920 			max_type = PCI_INTR_TYPE_INTX;
   1921 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1922 		}
   1923 	} else if (wm_disable_msix != 0) {
   1924 		max_type = PCI_INTR_TYPE_MSI;
   1925 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1926 	}
   1927 
   1928 alloc_retry:
   1929 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1930 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1931 		return;
   1932 	}
   1933 
   1934 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1935 		error = wm_setup_msix(sc);
   1936 		if (error) {
   1937 			pci_intr_release(pc, sc->sc_intrs,
   1938 			    counts[PCI_INTR_TYPE_MSIX]);
   1939 
   1940 			/* Setup for MSI: Disable MSI-X */
   1941 			max_type = PCI_INTR_TYPE_MSI;
   1942 			counts[PCI_INTR_TYPE_MSI] = 1;
   1943 			counts[PCI_INTR_TYPE_INTX] = 1;
   1944 			goto alloc_retry;
   1945 		}
   1946 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1947 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1948 		error = wm_setup_legacy(sc);
   1949 		if (error) {
   1950 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1951 			    counts[PCI_INTR_TYPE_MSI]);
   1952 
   1953 			/* The next try is for INTx: Disable MSI */
   1954 			max_type = PCI_INTR_TYPE_INTX;
   1955 			counts[PCI_INTR_TYPE_INTX] = 1;
   1956 			goto alloc_retry;
   1957 		}
   1958 	} else {
   1959 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1960 		error = wm_setup_legacy(sc);
   1961 		if (error) {
   1962 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1963 			    counts[PCI_INTR_TYPE_INTX]);
   1964 			return;
   1965 		}
   1966 	}
   1967 
   1968 	/*
   1969 	 * Check the function ID (unit number of the chip).
   1970 	 */
   1971 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1972 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   1973 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1974 	    || (sc->sc_type == WM_T_82580)
   1975 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1976 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1977 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1978 	else
   1979 		sc->sc_funcid = 0;
   1980 
   1981 	/*
   1982 	 * Determine a few things about the bus we're connected to.
   1983 	 */
   1984 	if (sc->sc_type < WM_T_82543) {
   1985 		/* We don't really know the bus characteristics here. */
   1986 		sc->sc_bus_speed = 33;
   1987 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1988 		/*
   1989 		 * CSA (Communication Streaming Architecture) is about as fast
   1990 		 * a 32-bit 66MHz PCI Bus.
   1991 		 */
   1992 		sc->sc_flags |= WM_F_CSA;
   1993 		sc->sc_bus_speed = 66;
   1994 		aprint_verbose_dev(sc->sc_dev,
   1995 		    "Communication Streaming Architecture\n");
   1996 		if (sc->sc_type == WM_T_82547) {
   1997 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1998 			callout_setfunc(&sc->sc_txfifo_ch,
   1999 			    wm_82547_txfifo_stall, sc);
   2000 			aprint_verbose_dev(sc->sc_dev,
   2001 			    "using 82547 Tx FIFO stall work-around\n");
   2002 		}
   2003 	} else if (sc->sc_type >= WM_T_82571) {
   2004 		sc->sc_flags |= WM_F_PCIE;
   2005 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2006 		    && (sc->sc_type != WM_T_ICH10)
   2007 		    && (sc->sc_type != WM_T_PCH)
   2008 		    && (sc->sc_type != WM_T_PCH2)
   2009 		    && (sc->sc_type != WM_T_PCH_LPT)
   2010 		    && (sc->sc_type != WM_T_PCH_SPT)
   2011 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2012 			/* ICH* and PCH* have no PCIe capability registers */
   2013 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2014 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2015 				NULL) == 0)
   2016 				aprint_error_dev(sc->sc_dev,
   2017 				    "unable to find PCIe capability\n");
   2018 		}
   2019 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2020 	} else {
   2021 		reg = CSR_READ(sc, WMREG_STATUS);
   2022 		if (reg & STATUS_BUS64)
   2023 			sc->sc_flags |= WM_F_BUS64;
   2024 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2025 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2026 
   2027 			sc->sc_flags |= WM_F_PCIX;
   2028 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2029 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2030 				aprint_error_dev(sc->sc_dev,
   2031 				    "unable to find PCIX capability\n");
   2032 			else if (sc->sc_type != WM_T_82545_3 &&
   2033 				 sc->sc_type != WM_T_82546_3) {
   2034 				/*
   2035 				 * Work around a problem caused by the BIOS
   2036 				 * setting the max memory read byte count
   2037 				 * incorrectly.
   2038 				 */
   2039 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2040 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2041 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2042 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2043 
   2044 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2045 				    PCIX_CMD_BYTECNT_SHIFT;
   2046 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2047 				    PCIX_STATUS_MAXB_SHIFT;
   2048 				if (bytecnt > maxb) {
   2049 					aprint_verbose_dev(sc->sc_dev,
   2050 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2051 					    512 << bytecnt, 512 << maxb);
   2052 					pcix_cmd = (pcix_cmd &
   2053 					    ~PCIX_CMD_BYTECNT_MASK) |
   2054 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2055 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2056 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2057 					    pcix_cmd);
   2058 				}
   2059 			}
   2060 		}
   2061 		/*
   2062 		 * The quad port adapter is special; it has a PCIX-PCIX
   2063 		 * bridge on the board, and can run the secondary bus at
   2064 		 * a higher speed.
   2065 		 */
   2066 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2067 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2068 								      : 66;
   2069 		} else if (sc->sc_flags & WM_F_PCIX) {
   2070 			switch (reg & STATUS_PCIXSPD_MASK) {
   2071 			case STATUS_PCIXSPD_50_66:
   2072 				sc->sc_bus_speed = 66;
   2073 				break;
   2074 			case STATUS_PCIXSPD_66_100:
   2075 				sc->sc_bus_speed = 100;
   2076 				break;
   2077 			case STATUS_PCIXSPD_100_133:
   2078 				sc->sc_bus_speed = 133;
   2079 				break;
   2080 			default:
   2081 				aprint_error_dev(sc->sc_dev,
   2082 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2083 				    reg & STATUS_PCIXSPD_MASK);
   2084 				sc->sc_bus_speed = 66;
   2085 				break;
   2086 			}
   2087 		} else
   2088 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2089 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2090 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2091 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2092 	}
   2093 
   2094 	/* Disable ASPM L0s and/or L1 for workaround */
   2095 	wm_disable_aspm(sc);
   2096 
   2097 	/* clear interesting stat counters */
   2098 	CSR_READ(sc, WMREG_COLC);
   2099 	CSR_READ(sc, WMREG_RXERRC);
   2100 
   2101 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2102 	    || (sc->sc_type >= WM_T_ICH8))
   2103 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2104 	if (sc->sc_type >= WM_T_ICH8)
   2105 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2106 
   2107 	/* Set PHY, NVM mutex related stuff */
   2108 	switch (sc->sc_type) {
   2109 	case WM_T_82542_2_0:
   2110 	case WM_T_82542_2_1:
   2111 	case WM_T_82543:
   2112 	case WM_T_82544:
   2113 		/* Microwire */
   2114 		sc->nvm.read = wm_nvm_read_uwire;
   2115 		sc->sc_nvm_wordsize = 64;
   2116 		sc->sc_nvm_addrbits = 6;
   2117 		break;
   2118 	case WM_T_82540:
   2119 	case WM_T_82545:
   2120 	case WM_T_82545_3:
   2121 	case WM_T_82546:
   2122 	case WM_T_82546_3:
   2123 		/* Microwire */
   2124 		sc->nvm.read = wm_nvm_read_uwire;
   2125 		reg = CSR_READ(sc, WMREG_EECD);
   2126 		if (reg & EECD_EE_SIZE) {
   2127 			sc->sc_nvm_wordsize = 256;
   2128 			sc->sc_nvm_addrbits = 8;
   2129 		} else {
   2130 			sc->sc_nvm_wordsize = 64;
   2131 			sc->sc_nvm_addrbits = 6;
   2132 		}
   2133 		sc->sc_flags |= WM_F_LOCK_EECD;
   2134 		sc->nvm.acquire = wm_get_eecd;
   2135 		sc->nvm.release = wm_put_eecd;
   2136 		break;
   2137 	case WM_T_82541:
   2138 	case WM_T_82541_2:
   2139 	case WM_T_82547:
   2140 	case WM_T_82547_2:
   2141 		reg = CSR_READ(sc, WMREG_EECD);
   2142 		/*
   2143 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2144 		 * on 8254[17], so set flags and functios before calling it.
   2145 		 */
   2146 		sc->sc_flags |= WM_F_LOCK_EECD;
   2147 		sc->nvm.acquire = wm_get_eecd;
   2148 		sc->nvm.release = wm_put_eecd;
   2149 		if (reg & EECD_EE_TYPE) {
   2150 			/* SPI */
   2151 			sc->nvm.read = wm_nvm_read_spi;
   2152 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2153 			wm_nvm_set_addrbits_size_eecd(sc);
   2154 		} else {
   2155 			/* Microwire */
   2156 			sc->nvm.read = wm_nvm_read_uwire;
   2157 			if ((reg & EECD_EE_ABITS) != 0) {
   2158 				sc->sc_nvm_wordsize = 256;
   2159 				sc->sc_nvm_addrbits = 8;
   2160 			} else {
   2161 				sc->sc_nvm_wordsize = 64;
   2162 				sc->sc_nvm_addrbits = 6;
   2163 			}
   2164 		}
   2165 		break;
   2166 	case WM_T_82571:
   2167 	case WM_T_82572:
   2168 		/* SPI */
   2169 		sc->nvm.read = wm_nvm_read_eerd;
   2170 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2171 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2172 		wm_nvm_set_addrbits_size_eecd(sc);
   2173 		sc->phy.acquire = wm_get_swsm_semaphore;
   2174 		sc->phy.release = wm_put_swsm_semaphore;
   2175 		sc->nvm.acquire = wm_get_nvm_82571;
   2176 		sc->nvm.release = wm_put_nvm_82571;
   2177 		break;
   2178 	case WM_T_82573:
   2179 	case WM_T_82574:
   2180 	case WM_T_82583:
   2181 		sc->nvm.read = wm_nvm_read_eerd;
   2182 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2183 		if (sc->sc_type == WM_T_82573) {
   2184 			sc->phy.acquire = wm_get_swsm_semaphore;
   2185 			sc->phy.release = wm_put_swsm_semaphore;
   2186 			sc->nvm.acquire = wm_get_nvm_82571;
   2187 			sc->nvm.release = wm_put_nvm_82571;
   2188 		} else {
   2189 			/* Both PHY and NVM use the same semaphore. */
   2190 			sc->phy.acquire = sc->nvm.acquire
   2191 			    = wm_get_swfwhw_semaphore;
   2192 			sc->phy.release = sc->nvm.release
   2193 			    = wm_put_swfwhw_semaphore;
   2194 		}
   2195 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2196 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2197 			sc->sc_nvm_wordsize = 2048;
   2198 		} else {
   2199 			/* SPI */
   2200 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2201 			wm_nvm_set_addrbits_size_eecd(sc);
   2202 		}
   2203 		break;
   2204 	case WM_T_82575:
   2205 	case WM_T_82576:
   2206 	case WM_T_82580:
   2207 	case WM_T_I350:
   2208 	case WM_T_I354:
   2209 	case WM_T_80003:
   2210 		/* SPI */
   2211 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2212 		wm_nvm_set_addrbits_size_eecd(sc);
   2213 		if ((sc->sc_type == WM_T_80003)
   2214 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2215 			sc->nvm.read = wm_nvm_read_eerd;
   2216 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2217 		} else {
   2218 			sc->nvm.read = wm_nvm_read_spi;
   2219 			sc->sc_flags |= WM_F_LOCK_EECD;
   2220 		}
   2221 		sc->phy.acquire = wm_get_phy_82575;
   2222 		sc->phy.release = wm_put_phy_82575;
   2223 		sc->nvm.acquire = wm_get_nvm_80003;
   2224 		sc->nvm.release = wm_put_nvm_80003;
   2225 		break;
   2226 	case WM_T_ICH8:
   2227 	case WM_T_ICH9:
   2228 	case WM_T_ICH10:
   2229 	case WM_T_PCH:
   2230 	case WM_T_PCH2:
   2231 	case WM_T_PCH_LPT:
   2232 		sc->nvm.read = wm_nvm_read_ich8;
   2233 		/* FLASH */
   2234 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2235 		sc->sc_nvm_wordsize = 2048;
   2236 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2237 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2238 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2239 			aprint_error_dev(sc->sc_dev,
   2240 			    "can't map FLASH registers\n");
   2241 			goto out;
   2242 		}
   2243 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2244 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2245 		    ICH_FLASH_SECTOR_SIZE;
   2246 		sc->sc_ich8_flash_bank_size =
   2247 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2248 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2249 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2250 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2251 		sc->sc_flashreg_offset = 0;
   2252 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2253 		sc->phy.release = wm_put_swflag_ich8lan;
   2254 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2255 		sc->nvm.release = wm_put_nvm_ich8lan;
   2256 		break;
   2257 	case WM_T_PCH_SPT:
   2258 	case WM_T_PCH_CNP:
   2259 		sc->nvm.read = wm_nvm_read_spt;
   2260 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2261 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2262 		sc->sc_flasht = sc->sc_st;
   2263 		sc->sc_flashh = sc->sc_sh;
   2264 		sc->sc_ich8_flash_base = 0;
   2265 		sc->sc_nvm_wordsize =
   2266 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2267 		    * NVM_SIZE_MULTIPLIER;
   2268 		/* It is size in bytes, we want words */
   2269 		sc->sc_nvm_wordsize /= 2;
   2270 		/* assume 2 banks */
   2271 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2272 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2273 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2274 		sc->phy.release = wm_put_swflag_ich8lan;
   2275 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2276 		sc->nvm.release = wm_put_nvm_ich8lan;
   2277 		break;
   2278 	case WM_T_I210:
   2279 	case WM_T_I211:
   2280 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2281 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2282 		if (wm_nvm_flash_presence_i210(sc)) {
   2283 			sc->nvm.read = wm_nvm_read_eerd;
   2284 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2285 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2286 			wm_nvm_set_addrbits_size_eecd(sc);
   2287 		} else {
   2288 			sc->nvm.read = wm_nvm_read_invm;
   2289 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2290 			sc->sc_nvm_wordsize = INVM_SIZE;
   2291 		}
   2292 		sc->phy.acquire = wm_get_phy_82575;
   2293 		sc->phy.release = wm_put_phy_82575;
   2294 		sc->nvm.acquire = wm_get_nvm_80003;
   2295 		sc->nvm.release = wm_put_nvm_80003;
   2296 		break;
   2297 	default:
   2298 		break;
   2299 	}
   2300 
   2301 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2302 	switch (sc->sc_type) {
   2303 	case WM_T_82571:
   2304 	case WM_T_82572:
   2305 		reg = CSR_READ(sc, WMREG_SWSM2);
   2306 		if ((reg & SWSM2_LOCK) == 0) {
   2307 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2308 			force_clear_smbi = true;
   2309 		} else
   2310 			force_clear_smbi = false;
   2311 		break;
   2312 	case WM_T_82573:
   2313 	case WM_T_82574:
   2314 	case WM_T_82583:
   2315 		force_clear_smbi = true;
   2316 		break;
   2317 	default:
   2318 		force_clear_smbi = false;
   2319 		break;
   2320 	}
   2321 	if (force_clear_smbi) {
   2322 		reg = CSR_READ(sc, WMREG_SWSM);
   2323 		if ((reg & SWSM_SMBI) != 0)
   2324 			aprint_error_dev(sc->sc_dev,
   2325 			    "Please update the Bootagent\n");
   2326 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2327 	}
   2328 
   2329 	/*
   2330 	 * Defer printing the EEPROM type until after verifying the checksum
   2331 	 * This allows the EEPROM type to be printed correctly in the case
   2332 	 * that no EEPROM is attached.
   2333 	 */
   2334 	/*
   2335 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2336 	 * this for later, so we can fail future reads from the EEPROM.
   2337 	 */
   2338 	if (wm_nvm_validate_checksum(sc)) {
   2339 		/*
   2340 		 * Read twice again because some PCI-e parts fail the
   2341 		 * first check due to the link being in sleep state.
   2342 		 */
   2343 		if (wm_nvm_validate_checksum(sc))
   2344 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2345 	}
   2346 
   2347 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2348 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2349 	else {
   2350 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2351 		    sc->sc_nvm_wordsize);
   2352 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2353 			aprint_verbose("iNVM");
   2354 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2355 			aprint_verbose("FLASH(HW)");
   2356 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2357 			aprint_verbose("FLASH");
   2358 		else {
   2359 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2360 				eetype = "SPI";
   2361 			else
   2362 				eetype = "MicroWire";
   2363 			aprint_verbose("(%d address bits) %s EEPROM",
   2364 			    sc->sc_nvm_addrbits, eetype);
   2365 		}
   2366 	}
   2367 	wm_nvm_version(sc);
   2368 	aprint_verbose("\n");
   2369 
   2370 	/*
   2371 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2372 	 * incorrect.
   2373 	 */
   2374 	wm_gmii_setup_phytype(sc, 0, 0);
   2375 
   2376 	/* Reset the chip to a known state. */
   2377 	wm_reset(sc);
   2378 
   2379 	/*
   2380 	 * Check for I21[01] PLL workaround.
   2381 	 *
   2382 	 * Three cases:
   2383 	 * a) Chip is I211.
   2384 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2385 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2386 	 */
   2387 	if (sc->sc_type == WM_T_I211)
   2388 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2389 	if (sc->sc_type == WM_T_I210) {
   2390 		if (!wm_nvm_flash_presence_i210(sc))
   2391 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2392 		else if ((sc->sc_nvm_ver_major < 3)
   2393 		    || ((sc->sc_nvm_ver_major == 3)
   2394 			&& (sc->sc_nvm_ver_minor < 25))) {
   2395 			aprint_verbose_dev(sc->sc_dev,
   2396 			    "ROM image version %d.%d is older than 3.25\n",
   2397 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2398 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2399 		}
   2400 	}
   2401 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2402 		wm_pll_workaround_i210(sc);
   2403 
   2404 	wm_get_wakeup(sc);
   2405 
   2406 	/* Non-AMT based hardware can now take control from firmware */
   2407 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2408 		wm_get_hw_control(sc);
   2409 
   2410 	/*
   2411 	 * Read the Ethernet address from the EEPROM, if not first found
   2412 	 * in device properties.
   2413 	 */
   2414 	ea = prop_dictionary_get(dict, "mac-address");
   2415 	if (ea != NULL) {
   2416 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2417 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2418 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2419 	} else {
   2420 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2421 			aprint_error_dev(sc->sc_dev,
   2422 			    "unable to read Ethernet address\n");
   2423 			goto out;
   2424 		}
   2425 	}
   2426 
   2427 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2428 	    ether_sprintf(enaddr));
   2429 
   2430 	/*
   2431 	 * Read the config info from the EEPROM, and set up various
   2432 	 * bits in the control registers based on their contents.
   2433 	 */
   2434 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2435 	if (pn != NULL) {
   2436 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2437 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2438 	} else {
   2439 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2440 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2441 			goto out;
   2442 		}
   2443 	}
   2444 
   2445 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2446 	if (pn != NULL) {
   2447 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2448 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2449 	} else {
   2450 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2451 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2452 			goto out;
   2453 		}
   2454 	}
   2455 
   2456 	/* check for WM_F_WOL */
   2457 	switch (sc->sc_type) {
   2458 	case WM_T_82542_2_0:
   2459 	case WM_T_82542_2_1:
   2460 	case WM_T_82543:
   2461 		/* dummy? */
   2462 		eeprom_data = 0;
   2463 		apme_mask = NVM_CFG3_APME;
   2464 		break;
   2465 	case WM_T_82544:
   2466 		apme_mask = NVM_CFG2_82544_APM_EN;
   2467 		eeprom_data = cfg2;
   2468 		break;
   2469 	case WM_T_82546:
   2470 	case WM_T_82546_3:
   2471 	case WM_T_82571:
   2472 	case WM_T_82572:
   2473 	case WM_T_82573:
   2474 	case WM_T_82574:
   2475 	case WM_T_82583:
   2476 	case WM_T_80003:
   2477 	default:
   2478 		apme_mask = NVM_CFG3_APME;
   2479 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2480 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2481 		break;
   2482 	case WM_T_82575:
   2483 	case WM_T_82576:
   2484 	case WM_T_82580:
   2485 	case WM_T_I350:
   2486 	case WM_T_I354: /* XXX ok? */
   2487 	case WM_T_ICH8:
   2488 	case WM_T_ICH9:
   2489 	case WM_T_ICH10:
   2490 	case WM_T_PCH:
   2491 	case WM_T_PCH2:
   2492 	case WM_T_PCH_LPT:
   2493 	case WM_T_PCH_SPT:
   2494 	case WM_T_PCH_CNP:
   2495 		/* XXX The funcid should be checked on some devices */
   2496 		apme_mask = WUC_APME;
   2497 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2498 		break;
   2499 	}
   2500 
   2501 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2502 	if ((eeprom_data & apme_mask) != 0)
   2503 		sc->sc_flags |= WM_F_WOL;
   2504 
   2505 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2506 		/* Check NVM for autonegotiation */
   2507 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2508 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2509 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2510 		}
   2511 	}
   2512 
   2513 	/*
   2514 	 * XXX need special handling for some multiple port cards
   2515 	 * to disable a paticular port.
   2516 	 */
   2517 
   2518 	if (sc->sc_type >= WM_T_82544) {
   2519 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2520 		if (pn != NULL) {
   2521 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2522 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2523 		} else {
   2524 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2525 				aprint_error_dev(sc->sc_dev,
   2526 				    "unable to read SWDPIN\n");
   2527 				goto out;
   2528 			}
   2529 		}
   2530 	}
   2531 
   2532 	if (cfg1 & NVM_CFG1_ILOS)
   2533 		sc->sc_ctrl |= CTRL_ILOS;
   2534 
   2535 	/*
   2536 	 * XXX
   2537 	 * This code isn't correct because pin 2 and 3 are located
   2538 	 * in different position on newer chips. Check all datasheet.
   2539 	 *
   2540 	 * Until resolve this problem, check if a chip < 82580
   2541 	 */
   2542 	if (sc->sc_type <= WM_T_82580) {
   2543 		if (sc->sc_type >= WM_T_82544) {
   2544 			sc->sc_ctrl |=
   2545 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2546 			    CTRL_SWDPIO_SHIFT;
   2547 			sc->sc_ctrl |=
   2548 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2549 			    CTRL_SWDPINS_SHIFT;
   2550 		} else {
   2551 			sc->sc_ctrl |=
   2552 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2553 			    CTRL_SWDPIO_SHIFT;
   2554 		}
   2555 	}
   2556 
   2557 	/* XXX For other than 82580? */
   2558 	if (sc->sc_type == WM_T_82580) {
   2559 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2560 		if (nvmword & __BIT(13))
   2561 			sc->sc_ctrl |= CTRL_ILOS;
   2562 	}
   2563 
   2564 #if 0
   2565 	if (sc->sc_type >= WM_T_82544) {
   2566 		if (cfg1 & NVM_CFG1_IPS0)
   2567 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2568 		if (cfg1 & NVM_CFG1_IPS1)
   2569 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2570 		sc->sc_ctrl_ext |=
   2571 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2572 		    CTRL_EXT_SWDPIO_SHIFT;
   2573 		sc->sc_ctrl_ext |=
   2574 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2575 		    CTRL_EXT_SWDPINS_SHIFT;
   2576 	} else {
   2577 		sc->sc_ctrl_ext |=
   2578 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2579 		    CTRL_EXT_SWDPIO_SHIFT;
   2580 	}
   2581 #endif
   2582 
   2583 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2584 #if 0
   2585 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2586 #endif
   2587 
   2588 	if (sc->sc_type == WM_T_PCH) {
   2589 		uint16_t val;
   2590 
   2591 		/* Save the NVM K1 bit setting */
   2592 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2593 
   2594 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2595 			sc->sc_nvm_k1_enabled = 1;
   2596 		else
   2597 			sc->sc_nvm_k1_enabled = 0;
   2598 	}
   2599 
   2600 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2601 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2602 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2603 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2604 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2605 	    || sc->sc_type == WM_T_82573
   2606 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2607 		/* Copper only */
   2608 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2609 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2610 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2611 	    || (sc->sc_type ==WM_T_I211)) {
   2612 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2613 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2614 		switch (link_mode) {
   2615 		case CTRL_EXT_LINK_MODE_1000KX:
   2616 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2617 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2618 			break;
   2619 		case CTRL_EXT_LINK_MODE_SGMII:
   2620 			if (wm_sgmii_uses_mdio(sc)) {
   2621 				aprint_verbose_dev(sc->sc_dev,
   2622 				    "SGMII(MDIO)\n");
   2623 				sc->sc_flags |= WM_F_SGMII;
   2624 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2625 				break;
   2626 			}
   2627 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2628 			/*FALLTHROUGH*/
   2629 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2630 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2631 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2632 				if (link_mode
   2633 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2634 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2635 					sc->sc_flags |= WM_F_SGMII;
   2636 				} else {
   2637 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2638 					aprint_verbose_dev(sc->sc_dev,
   2639 					    "SERDES\n");
   2640 				}
   2641 				break;
   2642 			}
   2643 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2644 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2645 
   2646 			/* Change current link mode setting */
   2647 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2648 			switch (sc->sc_mediatype) {
   2649 			case WM_MEDIATYPE_COPPER:
   2650 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2651 				break;
   2652 			case WM_MEDIATYPE_SERDES:
   2653 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2654 				break;
   2655 			default:
   2656 				break;
   2657 			}
   2658 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2659 			break;
   2660 		case CTRL_EXT_LINK_MODE_GMII:
   2661 		default:
   2662 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2663 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2664 			break;
   2665 		}
   2666 
   2667 		reg &= ~CTRL_EXT_I2C_ENA;
   2668 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2669 			reg |= CTRL_EXT_I2C_ENA;
   2670 		else
   2671 			reg &= ~CTRL_EXT_I2C_ENA;
   2672 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2673 	} else if (sc->sc_type < WM_T_82543 ||
   2674 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2675 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2676 			aprint_error_dev(sc->sc_dev,
   2677 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2678 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2679 		}
   2680 	} else {
   2681 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2682 			aprint_error_dev(sc->sc_dev,
   2683 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2684 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2685 		}
   2686 	}
   2687 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2688 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2689 
   2690 	/* Set device properties (macflags) */
   2691 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2692 
   2693 	/* Initialize the media structures accordingly. */
   2694 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2695 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2696 	else
   2697 		wm_tbi_mediainit(sc); /* All others */
   2698 
   2699 	ifp = &sc->sc_ethercom.ec_if;
   2700 	xname = device_xname(sc->sc_dev);
   2701 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2702 	ifp->if_softc = sc;
   2703 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2704 #ifdef WM_MPSAFE
   2705 	ifp->if_extflags = IFEF_MPSAFE;
   2706 #endif
   2707 	ifp->if_ioctl = wm_ioctl;
   2708 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2709 		ifp->if_start = wm_nq_start;
   2710 		/*
   2711 		 * When the number of CPUs is one and the controller can use
   2712 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2713 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2714 		 * and the other is used for link status changing.
   2715 		 * In this situation, wm_nq_transmit() is disadvantageous
   2716 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2717 		 */
   2718 		if (wm_is_using_multiqueue(sc))
   2719 			ifp->if_transmit = wm_nq_transmit;
   2720 	} else {
   2721 		ifp->if_start = wm_start;
   2722 		/*
   2723 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2724 		 */
   2725 		if (wm_is_using_multiqueue(sc))
   2726 			ifp->if_transmit = wm_transmit;
   2727 	}
   2728 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2729 	ifp->if_init = wm_init;
   2730 	ifp->if_stop = wm_stop;
   2731 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2732 	IFQ_SET_READY(&ifp->if_snd);
   2733 
   2734 	/* Check for jumbo frame */
   2735 	switch (sc->sc_type) {
   2736 	case WM_T_82573:
   2737 		/* XXX limited to 9234 if ASPM is disabled */
   2738 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2739 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2740 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2741 		break;
   2742 	case WM_T_82571:
   2743 	case WM_T_82572:
   2744 	case WM_T_82574:
   2745 	case WM_T_82583:
   2746 	case WM_T_82575:
   2747 	case WM_T_82576:
   2748 	case WM_T_82580:
   2749 	case WM_T_I350:
   2750 	case WM_T_I354:
   2751 	case WM_T_I210:
   2752 	case WM_T_I211:
   2753 	case WM_T_80003:
   2754 	case WM_T_ICH9:
   2755 	case WM_T_ICH10:
   2756 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2757 	case WM_T_PCH_LPT:
   2758 	case WM_T_PCH_SPT:
   2759 	case WM_T_PCH_CNP:
   2760 		/* XXX limited to 9234 */
   2761 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2762 		break;
   2763 	case WM_T_PCH:
   2764 		/* XXX limited to 4096 */
   2765 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2766 		break;
   2767 	case WM_T_82542_2_0:
   2768 	case WM_T_82542_2_1:
   2769 	case WM_T_ICH8:
   2770 		/* No support for jumbo frame */
   2771 		break;
   2772 	default:
   2773 		/* ETHER_MAX_LEN_JUMBO */
   2774 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2775 		break;
   2776 	}
   2777 
   2778 	/* If we're a i82543 or greater, we can support VLANs. */
   2779 	if (sc->sc_type >= WM_T_82543)
   2780 		sc->sc_ethercom.ec_capabilities |=
   2781 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2782 
   2783 	/*
   2784 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2785 	 * on i82543 and later.
   2786 	 */
   2787 	if (sc->sc_type >= WM_T_82543) {
   2788 		ifp->if_capabilities |=
   2789 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2790 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2791 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2792 		    IFCAP_CSUM_TCPv6_Tx |
   2793 		    IFCAP_CSUM_UDPv6_Tx;
   2794 	}
   2795 
   2796 	/*
   2797 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2798 	 *
   2799 	 *	82541GI (8086:1076) ... no
   2800 	 *	82572EI (8086:10b9) ... yes
   2801 	 */
   2802 	if (sc->sc_type >= WM_T_82571) {
   2803 		ifp->if_capabilities |=
   2804 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2805 	}
   2806 
   2807 	/*
   2808 	 * If we're a i82544 or greater (except i82547), we can do
   2809 	 * TCP segmentation offload.
   2810 	 */
   2811 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2812 		ifp->if_capabilities |= IFCAP_TSOv4;
   2813 	}
   2814 
   2815 	if (sc->sc_type >= WM_T_82571) {
   2816 		ifp->if_capabilities |= IFCAP_TSOv6;
   2817 	}
   2818 
   2819 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2820 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2821 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2822 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2823 
   2824 #ifdef WM_MPSAFE
   2825 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2826 #else
   2827 	sc->sc_core_lock = NULL;
   2828 #endif
   2829 
   2830 	/* Attach the interface. */
   2831 	error = if_initialize(ifp);
   2832 	if (error != 0) {
   2833 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2834 		    error);
   2835 		return; /* Error */
   2836 	}
   2837 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2838 	ether_ifattach(ifp, enaddr);
   2839 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2840 	if_register(ifp);
   2841 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2842 	    RND_FLAG_DEFAULT);
   2843 
   2844 #ifdef WM_EVENT_COUNTERS
   2845 	/* Attach event counters. */
   2846 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2847 	    NULL, xname, "linkintr");
   2848 
   2849 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2850 	    NULL, xname, "tx_xoff");
   2851 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2852 	    NULL, xname, "tx_xon");
   2853 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2854 	    NULL, xname, "rx_xoff");
   2855 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2856 	    NULL, xname, "rx_xon");
   2857 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2858 	    NULL, xname, "rx_macctl");
   2859 #endif /* WM_EVENT_COUNTERS */
   2860 
   2861 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2862 		pmf_class_network_register(self, ifp);
   2863 	else
   2864 		aprint_error_dev(self, "couldn't establish power handler\n");
   2865 
   2866 	sc->sc_flags |= WM_F_ATTACHED;
   2867  out:
   2868 	return;
   2869 }
   2870 
   2871 /* The detach function (ca_detach) */
   2872 static int
   2873 wm_detach(device_t self, int flags __unused)
   2874 {
   2875 	struct wm_softc *sc = device_private(self);
   2876 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2877 	int i;
   2878 
   2879 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2880 		return 0;
   2881 
   2882 	/* Stop the interface. Callouts are stopped in it. */
   2883 	wm_stop(ifp, 1);
   2884 
   2885 	pmf_device_deregister(self);
   2886 
   2887 #ifdef WM_EVENT_COUNTERS
   2888 	evcnt_detach(&sc->sc_ev_linkintr);
   2889 
   2890 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2891 	evcnt_detach(&sc->sc_ev_tx_xon);
   2892 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2893 	evcnt_detach(&sc->sc_ev_rx_xon);
   2894 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2895 #endif /* WM_EVENT_COUNTERS */
   2896 
   2897 	/* Tell the firmware about the release */
   2898 	WM_CORE_LOCK(sc);
   2899 	wm_release_manageability(sc);
   2900 	wm_release_hw_control(sc);
   2901 	wm_enable_wakeup(sc);
   2902 	WM_CORE_UNLOCK(sc);
   2903 
   2904 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2905 
   2906 	/* Delete all remaining media. */
   2907 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2908 
   2909 	ether_ifdetach(ifp);
   2910 	if_detach(ifp);
   2911 	if_percpuq_destroy(sc->sc_ipq);
   2912 
   2913 	/* Unload RX dmamaps and free mbufs */
   2914 	for (i = 0; i < sc->sc_nqueues; i++) {
   2915 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2916 		mutex_enter(rxq->rxq_lock);
   2917 		wm_rxdrain(rxq);
   2918 		mutex_exit(rxq->rxq_lock);
   2919 	}
   2920 	/* Must unlock here */
   2921 
   2922 	/* Disestablish the interrupt handler */
   2923 	for (i = 0; i < sc->sc_nintrs; i++) {
   2924 		if (sc->sc_ihs[i] != NULL) {
   2925 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2926 			sc->sc_ihs[i] = NULL;
   2927 		}
   2928 	}
   2929 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2930 
   2931 	wm_free_txrx_queues(sc);
   2932 
   2933 	/* Unmap the registers */
   2934 	if (sc->sc_ss) {
   2935 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2936 		sc->sc_ss = 0;
   2937 	}
   2938 	if (sc->sc_ios) {
   2939 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2940 		sc->sc_ios = 0;
   2941 	}
   2942 	if (sc->sc_flashs) {
   2943 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2944 		sc->sc_flashs = 0;
   2945 	}
   2946 
   2947 	if (sc->sc_core_lock)
   2948 		mutex_obj_free(sc->sc_core_lock);
   2949 	if (sc->sc_ich_phymtx)
   2950 		mutex_obj_free(sc->sc_ich_phymtx);
   2951 	if (sc->sc_ich_nvmmtx)
   2952 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2953 
   2954 	return 0;
   2955 }
   2956 
   2957 static bool
   2958 wm_suspend(device_t self, const pmf_qual_t *qual)
   2959 {
   2960 	struct wm_softc *sc = device_private(self);
   2961 
   2962 	wm_release_manageability(sc);
   2963 	wm_release_hw_control(sc);
   2964 	wm_enable_wakeup(sc);
   2965 
   2966 	return true;
   2967 }
   2968 
   2969 static bool
   2970 wm_resume(device_t self, const pmf_qual_t *qual)
   2971 {
   2972 	struct wm_softc *sc = device_private(self);
   2973 
   2974 	/* Disable ASPM L0s and/or L1 for workaround */
   2975 	wm_disable_aspm(sc);
   2976 	wm_init_manageability(sc);
   2977 
   2978 	return true;
   2979 }
   2980 
   2981 /*
   2982  * wm_watchdog:		[ifnet interface function]
   2983  *
   2984  *	Watchdog timer handler.
   2985  */
   2986 static void
   2987 wm_watchdog(struct ifnet *ifp)
   2988 {
   2989 	int qid;
   2990 	struct wm_softc *sc = ifp->if_softc;
   2991 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   2992 
   2993 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2994 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2995 
   2996 		wm_watchdog_txq(ifp, txq, &hang_queue);
   2997 	}
   2998 
   2999 	/*
   3000 	 * IF any of queues hanged up, reset the interface.
   3001 	 */
   3002 	if (hang_queue != 0) {
   3003 		(void) wm_init(ifp);
   3004 
   3005 		/*
   3006 		 * There are still some upper layer processing which call
   3007 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3008 		 */
   3009 		/* Try to get more packets going. */
   3010 		ifp->if_start(ifp);
   3011 	}
   3012 }
   3013 
   3014 
   3015 static void
   3016 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3017 {
   3018 
   3019 	mutex_enter(txq->txq_lock);
   3020 	if (txq->txq_sending &&
   3021 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3022 		wm_watchdog_txq_locked(ifp, txq, hang);
   3023 	}
   3024 	mutex_exit(txq->txq_lock);
   3025 }
   3026 
   3027 static void
   3028 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3029     uint16_t *hang)
   3030 {
   3031 	struct wm_softc *sc = ifp->if_softc;
   3032 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3033 
   3034 	KASSERT(mutex_owned(txq->txq_lock));
   3035 
   3036 	/*
   3037 	 * Since we're using delayed interrupts, sweep up
   3038 	 * before we report an error.
   3039 	 */
   3040 	wm_txeof(txq, UINT_MAX);
   3041 
   3042 	if (txq->txq_sending)
   3043 		*hang |= __BIT(wmq->wmq_id);
   3044 
   3045 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3046 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3047 		    device_xname(sc->sc_dev));
   3048 	} else {
   3049 #ifdef WM_DEBUG
   3050 		int i, j;
   3051 		struct wm_txsoft *txs;
   3052 #endif
   3053 		log(LOG_ERR,
   3054 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3055 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3056 		    txq->txq_next);
   3057 		ifp->if_oerrors++;
   3058 #ifdef WM_DEBUG
   3059 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3060 		    i = WM_NEXTTXS(txq, i)) {
   3061 		    txs = &txq->txq_soft[i];
   3062 		    printf("txs %d tx %d -> %d\n",
   3063 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3064 		    for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3065 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3066 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3067 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3068 				    printf("\t %#08x%08x\n",
   3069 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3070 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3071 			    } else {
   3072 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3073 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3074 					txq->txq_descs[j].wtx_addr.wa_low);
   3075 				    printf("\t %#04x%02x%02x%08x\n",
   3076 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3077 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3078 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3079 					txq->txq_descs[j].wtx_cmdlen);
   3080 			    }
   3081 			if (j == txs->txs_lastdesc)
   3082 				break;
   3083 			}
   3084 		}
   3085 #endif
   3086 	}
   3087 }
   3088 
   3089 /*
   3090  * wm_tick:
   3091  *
   3092  *	One second timer, used to check link status, sweep up
   3093  *	completed transmit jobs, etc.
   3094  */
   3095 static void
   3096 wm_tick(void *arg)
   3097 {
   3098 	struct wm_softc *sc = arg;
   3099 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3100 #ifndef WM_MPSAFE
   3101 	int s = splnet();
   3102 #endif
   3103 
   3104 	WM_CORE_LOCK(sc);
   3105 
   3106 	if (sc->sc_core_stopping) {
   3107 		WM_CORE_UNLOCK(sc);
   3108 #ifndef WM_MPSAFE
   3109 		splx(s);
   3110 #endif
   3111 		return;
   3112 	}
   3113 
   3114 	if (sc->sc_type >= WM_T_82542_2_1) {
   3115 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3116 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3117 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3118 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3119 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3120 	}
   3121 
   3122 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3123 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3124 	    + CSR_READ(sc, WMREG_CRCERRS)
   3125 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3126 	    + CSR_READ(sc, WMREG_SYMERRC)
   3127 	    + CSR_READ(sc, WMREG_RXERRC)
   3128 	    + CSR_READ(sc, WMREG_SEC)
   3129 	    + CSR_READ(sc, WMREG_CEXTERR)
   3130 	    + CSR_READ(sc, WMREG_RLEC);
   3131 	/*
   3132 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3133 	 * memory. It does not mean the number of dropped packet. Because
   3134 	 * ethernet controller can receive packets in such case if there is
   3135 	 * space in phy's FIFO.
   3136 	 *
   3137 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3138 	 * own EVCNT instead of if_iqdrops.
   3139 	 */
   3140 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3141 
   3142 	if (sc->sc_flags & WM_F_HAS_MII)
   3143 		mii_tick(&sc->sc_mii);
   3144 	else if ((sc->sc_type >= WM_T_82575)
   3145 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3146 		wm_serdes_tick(sc);
   3147 	else
   3148 		wm_tbi_tick(sc);
   3149 
   3150 	WM_CORE_UNLOCK(sc);
   3151 
   3152 	wm_watchdog(ifp);
   3153 
   3154 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3155 }
   3156 
   3157 static int
   3158 wm_ifflags_cb(struct ethercom *ec)
   3159 {
   3160 	struct ifnet *ifp = &ec->ec_if;
   3161 	struct wm_softc *sc = ifp->if_softc;
   3162 	int rc = 0;
   3163 
   3164 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3165 		device_xname(sc->sc_dev), __func__));
   3166 
   3167 	WM_CORE_LOCK(sc);
   3168 
   3169 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3170 	sc->sc_if_flags = ifp->if_flags;
   3171 
   3172 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3173 		rc = ENETRESET;
   3174 		goto out;
   3175 	}
   3176 
   3177 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3178 		wm_set_filter(sc);
   3179 
   3180 	wm_set_vlan(sc);
   3181 
   3182 out:
   3183 	WM_CORE_UNLOCK(sc);
   3184 
   3185 	return rc;
   3186 }
   3187 
   3188 /*
   3189  * wm_ioctl:		[ifnet interface function]
   3190  *
   3191  *	Handle control requests from the operator.
   3192  */
   3193 static int
   3194 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3195 {
   3196 	struct wm_softc *sc = ifp->if_softc;
   3197 	struct ifreq *ifr = (struct ifreq *) data;
   3198 	struct ifaddr *ifa = (struct ifaddr *)data;
   3199 	struct sockaddr_dl *sdl;
   3200 	int s, error;
   3201 
   3202 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3203 		device_xname(sc->sc_dev), __func__));
   3204 
   3205 #ifndef WM_MPSAFE
   3206 	s = splnet();
   3207 #endif
   3208 	switch (cmd) {
   3209 	case SIOCSIFMEDIA:
   3210 	case SIOCGIFMEDIA:
   3211 		WM_CORE_LOCK(sc);
   3212 		/* Flow control requires full-duplex mode. */
   3213 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3214 		    (ifr->ifr_media & IFM_FDX) == 0)
   3215 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3216 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3217 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3218 				/* We can do both TXPAUSE and RXPAUSE. */
   3219 				ifr->ifr_media |=
   3220 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3221 			}
   3222 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3223 		}
   3224 		WM_CORE_UNLOCK(sc);
   3225 #ifdef WM_MPSAFE
   3226 		s = splnet();
   3227 #endif
   3228 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3229 #ifdef WM_MPSAFE
   3230 		splx(s);
   3231 #endif
   3232 		break;
   3233 	case SIOCINITIFADDR:
   3234 		WM_CORE_LOCK(sc);
   3235 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3236 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3237 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3238 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3239 			/* unicast address is first multicast entry */
   3240 			wm_set_filter(sc);
   3241 			error = 0;
   3242 			WM_CORE_UNLOCK(sc);
   3243 			break;
   3244 		}
   3245 		WM_CORE_UNLOCK(sc);
   3246 		/*FALLTHROUGH*/
   3247 	default:
   3248 #ifdef WM_MPSAFE
   3249 		s = splnet();
   3250 #endif
   3251 		/* It may call wm_start, so unlock here */
   3252 		error = ether_ioctl(ifp, cmd, data);
   3253 #ifdef WM_MPSAFE
   3254 		splx(s);
   3255 #endif
   3256 		if (error != ENETRESET)
   3257 			break;
   3258 
   3259 		error = 0;
   3260 
   3261 		if (cmd == SIOCSIFCAP) {
   3262 			error = (*ifp->if_init)(ifp);
   3263 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3264 			;
   3265 		else if (ifp->if_flags & IFF_RUNNING) {
   3266 			/*
   3267 			 * Multicast list has changed; set the hardware filter
   3268 			 * accordingly.
   3269 			 */
   3270 			WM_CORE_LOCK(sc);
   3271 			wm_set_filter(sc);
   3272 			WM_CORE_UNLOCK(sc);
   3273 		}
   3274 		break;
   3275 	}
   3276 
   3277 #ifndef WM_MPSAFE
   3278 	splx(s);
   3279 #endif
   3280 	return error;
   3281 }
   3282 
   3283 /* MAC address related */
   3284 
   3285 /*
   3286  * Get the offset of MAC address and return it.
   3287  * If error occured, use offset 0.
   3288  */
   3289 static uint16_t
   3290 wm_check_alt_mac_addr(struct wm_softc *sc)
   3291 {
   3292 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3293 	uint16_t offset = NVM_OFF_MACADDR;
   3294 
   3295 	/* Try to read alternative MAC address pointer */
   3296 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3297 		return 0;
   3298 
   3299 	/* Check pointer if it's valid or not. */
   3300 	if ((offset == 0x0000) || (offset == 0xffff))
   3301 		return 0;
   3302 
   3303 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3304 	/*
   3305 	 * Check whether alternative MAC address is valid or not.
   3306 	 * Some cards have non 0xffff pointer but those don't use
   3307 	 * alternative MAC address in reality.
   3308 	 *
   3309 	 * Check whether the broadcast bit is set or not.
   3310 	 */
   3311 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3312 		if (((myea[0] & 0xff) & 0x01) == 0)
   3313 			return offset; /* Found */
   3314 
   3315 	/* Not found */
   3316 	return 0;
   3317 }
   3318 
   3319 static int
   3320 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3321 {
   3322 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3323 	uint16_t offset = NVM_OFF_MACADDR;
   3324 	int do_invert = 0;
   3325 
   3326 	switch (sc->sc_type) {
   3327 	case WM_T_82580:
   3328 	case WM_T_I350:
   3329 	case WM_T_I354:
   3330 		/* EEPROM Top Level Partitioning */
   3331 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3332 		break;
   3333 	case WM_T_82571:
   3334 	case WM_T_82575:
   3335 	case WM_T_82576:
   3336 	case WM_T_80003:
   3337 	case WM_T_I210:
   3338 	case WM_T_I211:
   3339 		offset = wm_check_alt_mac_addr(sc);
   3340 		if (offset == 0)
   3341 			if ((sc->sc_funcid & 0x01) == 1)
   3342 				do_invert = 1;
   3343 		break;
   3344 	default:
   3345 		if ((sc->sc_funcid & 0x01) == 1)
   3346 			do_invert = 1;
   3347 		break;
   3348 	}
   3349 
   3350 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3351 		goto bad;
   3352 
   3353 	enaddr[0] = myea[0] & 0xff;
   3354 	enaddr[1] = myea[0] >> 8;
   3355 	enaddr[2] = myea[1] & 0xff;
   3356 	enaddr[3] = myea[1] >> 8;
   3357 	enaddr[4] = myea[2] & 0xff;
   3358 	enaddr[5] = myea[2] >> 8;
   3359 
   3360 	/*
   3361 	 * Toggle the LSB of the MAC address on the second port
   3362 	 * of some dual port cards.
   3363 	 */
   3364 	if (do_invert != 0)
   3365 		enaddr[5] ^= 1;
   3366 
   3367 	return 0;
   3368 
   3369  bad:
   3370 	return -1;
   3371 }
   3372 
   3373 /*
   3374  * wm_set_ral:
   3375  *
   3376  *	Set an entery in the receive address list.
   3377  */
   3378 static void
   3379 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3380 {
   3381 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3382 	uint32_t wlock_mac;
   3383 	int rv;
   3384 
   3385 	if (enaddr != NULL) {
   3386 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3387 		    (enaddr[3] << 24);
   3388 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3389 		ral_hi |= RAL_AV;
   3390 	} else {
   3391 		ral_lo = 0;
   3392 		ral_hi = 0;
   3393 	}
   3394 
   3395 	switch (sc->sc_type) {
   3396 	case WM_T_82542_2_0:
   3397 	case WM_T_82542_2_1:
   3398 	case WM_T_82543:
   3399 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3400 		CSR_WRITE_FLUSH(sc);
   3401 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3402 		CSR_WRITE_FLUSH(sc);
   3403 		break;
   3404 	case WM_T_PCH2:
   3405 	case WM_T_PCH_LPT:
   3406 	case WM_T_PCH_SPT:
   3407 	case WM_T_PCH_CNP:
   3408 		if (idx == 0) {
   3409 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3410 			CSR_WRITE_FLUSH(sc);
   3411 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3412 			CSR_WRITE_FLUSH(sc);
   3413 			return;
   3414 		}
   3415 		if (sc->sc_type != WM_T_PCH2) {
   3416 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3417 			    FWSM_WLOCK_MAC);
   3418 			addrl = WMREG_SHRAL(idx - 1);
   3419 			addrh = WMREG_SHRAH(idx - 1);
   3420 		} else {
   3421 			wlock_mac = 0;
   3422 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3423 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3424 		}
   3425 
   3426 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3427 			rv = wm_get_swflag_ich8lan(sc);
   3428 			if (rv != 0)
   3429 				return;
   3430 			CSR_WRITE(sc, addrl, ral_lo);
   3431 			CSR_WRITE_FLUSH(sc);
   3432 			CSR_WRITE(sc, addrh, ral_hi);
   3433 			CSR_WRITE_FLUSH(sc);
   3434 			wm_put_swflag_ich8lan(sc);
   3435 		}
   3436 
   3437 		break;
   3438 	default:
   3439 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3440 		CSR_WRITE_FLUSH(sc);
   3441 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3442 		CSR_WRITE_FLUSH(sc);
   3443 		break;
   3444 	}
   3445 }
   3446 
   3447 /*
   3448  * wm_mchash:
   3449  *
   3450  *	Compute the hash of the multicast address for the 4096-bit
   3451  *	multicast filter.
   3452  */
   3453 static uint32_t
   3454 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3455 {
   3456 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3457 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3458 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3459 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3460 	uint32_t hash;
   3461 
   3462 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3463 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3464 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3465 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3466 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3467 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3468 		return (hash & 0x3ff);
   3469 	}
   3470 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3471 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3472 
   3473 	return (hash & 0xfff);
   3474 }
   3475 
   3476 /*
   3477  * wm_set_filter:
   3478  *
   3479  *	Set up the receive filter.
   3480  */
   3481 static void
   3482 wm_set_filter(struct wm_softc *sc)
   3483 {
   3484 	struct ethercom *ec = &sc->sc_ethercom;
   3485 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3486 	struct ether_multi *enm;
   3487 	struct ether_multistep step;
   3488 	bus_addr_t mta_reg;
   3489 	uint32_t hash, reg, bit;
   3490 	int i, size, ralmax;
   3491 
   3492 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3493 		device_xname(sc->sc_dev), __func__));
   3494 
   3495 	if (sc->sc_type >= WM_T_82544)
   3496 		mta_reg = WMREG_CORDOVA_MTA;
   3497 	else
   3498 		mta_reg = WMREG_MTA;
   3499 
   3500 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3501 
   3502 	if (ifp->if_flags & IFF_BROADCAST)
   3503 		sc->sc_rctl |= RCTL_BAM;
   3504 	if (ifp->if_flags & IFF_PROMISC) {
   3505 		sc->sc_rctl |= RCTL_UPE;
   3506 		goto allmulti;
   3507 	}
   3508 
   3509 	/*
   3510 	 * Set the station address in the first RAL slot, and
   3511 	 * clear the remaining slots.
   3512 	 */
   3513 	if (sc->sc_type == WM_T_ICH8)
   3514 		size = WM_RAL_TABSIZE_ICH8 -1;
   3515 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3516 	    || (sc->sc_type == WM_T_PCH))
   3517 		size = WM_RAL_TABSIZE_ICH8;
   3518 	else if (sc->sc_type == WM_T_PCH2)
   3519 		size = WM_RAL_TABSIZE_PCH2;
   3520 	else if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3521 	    || (sc->sc_type == WM_T_PCH_CNP))
   3522 		size = WM_RAL_TABSIZE_PCH_LPT;
   3523 	else if (sc->sc_type == WM_T_82575)
   3524 		size = WM_RAL_TABSIZE_82575;
   3525 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3526 		size = WM_RAL_TABSIZE_82576;
   3527 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3528 		size = WM_RAL_TABSIZE_I350;
   3529 	else
   3530 		size = WM_RAL_TABSIZE;
   3531 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3532 
   3533 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3534 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3535 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3536 		switch (i) {
   3537 		case 0:
   3538 			/* We can use all entries */
   3539 			ralmax = size;
   3540 			break;
   3541 		case 1:
   3542 			/* Only RAR[0] */
   3543 			ralmax = 1;
   3544 			break;
   3545 		default:
   3546 			/* available SHRA + RAR[0] */
   3547 			ralmax = i + 1;
   3548 		}
   3549 	} else
   3550 		ralmax = size;
   3551 	for (i = 1; i < size; i++) {
   3552 		if (i < ralmax)
   3553 			wm_set_ral(sc, NULL, i);
   3554 	}
   3555 
   3556 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3557 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3558 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3559 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3560 		size = WM_ICH8_MC_TABSIZE;
   3561 	else
   3562 		size = WM_MC_TABSIZE;
   3563 	/* Clear out the multicast table. */
   3564 	for (i = 0; i < size; i++) {
   3565 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3566 		CSR_WRITE_FLUSH(sc);
   3567 	}
   3568 
   3569 	ETHER_LOCK(ec);
   3570 	ETHER_FIRST_MULTI(step, ec, enm);
   3571 	while (enm != NULL) {
   3572 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3573 			ETHER_UNLOCK(ec);
   3574 			/*
   3575 			 * We must listen to a range of multicast addresses.
   3576 			 * For now, just accept all multicasts, rather than
   3577 			 * trying to set only those filter bits needed to match
   3578 			 * the range.  (At this time, the only use of address
   3579 			 * ranges is for IP multicast routing, for which the
   3580 			 * range is big enough to require all bits set.)
   3581 			 */
   3582 			goto allmulti;
   3583 		}
   3584 
   3585 		hash = wm_mchash(sc, enm->enm_addrlo);
   3586 
   3587 		reg = (hash >> 5);
   3588 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3589 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3590 		    || (sc->sc_type == WM_T_PCH2)
   3591 		    || (sc->sc_type == WM_T_PCH_LPT)
   3592 		    || (sc->sc_type == WM_T_PCH_SPT)
   3593 		    || (sc->sc_type == WM_T_PCH_CNP))
   3594 			reg &= 0x1f;
   3595 		else
   3596 			reg &= 0x7f;
   3597 		bit = hash & 0x1f;
   3598 
   3599 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3600 		hash |= 1U << bit;
   3601 
   3602 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3603 			/*
   3604 			 * 82544 Errata 9: Certain register cannot be written
   3605 			 * with particular alignments in PCI-X bus operation
   3606 			 * (FCAH, MTA and VFTA).
   3607 			 */
   3608 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3609 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3610 			CSR_WRITE_FLUSH(sc);
   3611 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3612 			CSR_WRITE_FLUSH(sc);
   3613 		} else {
   3614 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3615 			CSR_WRITE_FLUSH(sc);
   3616 		}
   3617 
   3618 		ETHER_NEXT_MULTI(step, enm);
   3619 	}
   3620 	ETHER_UNLOCK(ec);
   3621 
   3622 	ifp->if_flags &= ~IFF_ALLMULTI;
   3623 	goto setit;
   3624 
   3625  allmulti:
   3626 	ifp->if_flags |= IFF_ALLMULTI;
   3627 	sc->sc_rctl |= RCTL_MPE;
   3628 
   3629  setit:
   3630 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3631 }
   3632 
   3633 /* Reset and init related */
   3634 
   3635 static void
   3636 wm_set_vlan(struct wm_softc *sc)
   3637 {
   3638 
   3639 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3640 		device_xname(sc->sc_dev), __func__));
   3641 
   3642 	/* Deal with VLAN enables. */
   3643 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3644 		sc->sc_ctrl |= CTRL_VME;
   3645 	else
   3646 		sc->sc_ctrl &= ~CTRL_VME;
   3647 
   3648 	/* Write the control registers. */
   3649 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3650 }
   3651 
   3652 static void
   3653 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3654 {
   3655 	uint32_t gcr;
   3656 	pcireg_t ctrl2;
   3657 
   3658 	gcr = CSR_READ(sc, WMREG_GCR);
   3659 
   3660 	/* Only take action if timeout value is defaulted to 0 */
   3661 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3662 		goto out;
   3663 
   3664 	if ((gcr & GCR_CAP_VER2) == 0) {
   3665 		gcr |= GCR_CMPL_TMOUT_10MS;
   3666 		goto out;
   3667 	}
   3668 
   3669 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3670 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3671 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3672 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3673 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3674 
   3675 out:
   3676 	/* Disable completion timeout resend */
   3677 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3678 
   3679 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3680 }
   3681 
   3682 void
   3683 wm_get_auto_rd_done(struct wm_softc *sc)
   3684 {
   3685 	int i;
   3686 
   3687 	/* wait for eeprom to reload */
   3688 	switch (sc->sc_type) {
   3689 	case WM_T_82571:
   3690 	case WM_T_82572:
   3691 	case WM_T_82573:
   3692 	case WM_T_82574:
   3693 	case WM_T_82583:
   3694 	case WM_T_82575:
   3695 	case WM_T_82576:
   3696 	case WM_T_82580:
   3697 	case WM_T_I350:
   3698 	case WM_T_I354:
   3699 	case WM_T_I210:
   3700 	case WM_T_I211:
   3701 	case WM_T_80003:
   3702 	case WM_T_ICH8:
   3703 	case WM_T_ICH9:
   3704 		for (i = 0; i < 10; i++) {
   3705 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3706 				break;
   3707 			delay(1000);
   3708 		}
   3709 		if (i == 10) {
   3710 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3711 			    "complete\n", device_xname(sc->sc_dev));
   3712 		}
   3713 		break;
   3714 	default:
   3715 		break;
   3716 	}
   3717 }
   3718 
   3719 void
   3720 wm_lan_init_done(struct wm_softc *sc)
   3721 {
   3722 	uint32_t reg = 0;
   3723 	int i;
   3724 
   3725 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3726 		device_xname(sc->sc_dev), __func__));
   3727 
   3728 	/* Wait for eeprom to reload */
   3729 	switch (sc->sc_type) {
   3730 	case WM_T_ICH10:
   3731 	case WM_T_PCH:
   3732 	case WM_T_PCH2:
   3733 	case WM_T_PCH_LPT:
   3734 	case WM_T_PCH_SPT:
   3735 	case WM_T_PCH_CNP:
   3736 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3737 			reg = CSR_READ(sc, WMREG_STATUS);
   3738 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3739 				break;
   3740 			delay(100);
   3741 		}
   3742 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3743 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3744 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3745 		}
   3746 		break;
   3747 	default:
   3748 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3749 		    __func__);
   3750 		break;
   3751 	}
   3752 
   3753 	reg &= ~STATUS_LAN_INIT_DONE;
   3754 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3755 }
   3756 
   3757 void
   3758 wm_get_cfg_done(struct wm_softc *sc)
   3759 {
   3760 	int mask;
   3761 	uint32_t reg;
   3762 	int i;
   3763 
   3764 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3765 		device_xname(sc->sc_dev), __func__));
   3766 
   3767 	/* Wait for eeprom to reload */
   3768 	switch (sc->sc_type) {
   3769 	case WM_T_82542_2_0:
   3770 	case WM_T_82542_2_1:
   3771 		/* null */
   3772 		break;
   3773 	case WM_T_82543:
   3774 	case WM_T_82544:
   3775 	case WM_T_82540:
   3776 	case WM_T_82545:
   3777 	case WM_T_82545_3:
   3778 	case WM_T_82546:
   3779 	case WM_T_82546_3:
   3780 	case WM_T_82541:
   3781 	case WM_T_82541_2:
   3782 	case WM_T_82547:
   3783 	case WM_T_82547_2:
   3784 	case WM_T_82573:
   3785 	case WM_T_82574:
   3786 	case WM_T_82583:
   3787 		/* generic */
   3788 		delay(10*1000);
   3789 		break;
   3790 	case WM_T_80003:
   3791 	case WM_T_82571:
   3792 	case WM_T_82572:
   3793 	case WM_T_82575:
   3794 	case WM_T_82576:
   3795 	case WM_T_82580:
   3796 	case WM_T_I350:
   3797 	case WM_T_I354:
   3798 	case WM_T_I210:
   3799 	case WM_T_I211:
   3800 		if (sc->sc_type == WM_T_82571) {
   3801 			/* Only 82571 shares port 0 */
   3802 			mask = EEMNGCTL_CFGDONE_0;
   3803 		} else
   3804 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3805 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3806 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3807 				break;
   3808 			delay(1000);
   3809 		}
   3810 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3811 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3812 				device_xname(sc->sc_dev), __func__));
   3813 		}
   3814 		break;
   3815 	case WM_T_ICH8:
   3816 	case WM_T_ICH9:
   3817 	case WM_T_ICH10:
   3818 	case WM_T_PCH:
   3819 	case WM_T_PCH2:
   3820 	case WM_T_PCH_LPT:
   3821 	case WM_T_PCH_SPT:
   3822 	case WM_T_PCH_CNP:
   3823 		delay(10*1000);
   3824 		if (sc->sc_type >= WM_T_ICH10)
   3825 			wm_lan_init_done(sc);
   3826 		else
   3827 			wm_get_auto_rd_done(sc);
   3828 
   3829 		reg = CSR_READ(sc, WMREG_STATUS);
   3830 		if ((reg & STATUS_PHYRA) != 0)
   3831 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3832 		break;
   3833 	default:
   3834 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3835 		    __func__);
   3836 		break;
   3837 	}
   3838 }
   3839 
   3840 void
   3841 wm_phy_post_reset(struct wm_softc *sc)
   3842 {
   3843 	uint32_t reg;
   3844 
   3845 	/* This function is only for ICH8 and newer. */
   3846 	if (sc->sc_type < WM_T_ICH8)
   3847 		return;
   3848 
   3849 	if (wm_phy_resetisblocked(sc)) {
   3850 		/* XXX */
   3851 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3852 		return;
   3853 	}
   3854 
   3855 	/* Allow time for h/w to get to quiescent state after reset */
   3856 	delay(10*1000);
   3857 
   3858 	/* Perform any necessary post-reset workarounds */
   3859 	if (sc->sc_type == WM_T_PCH)
   3860 		wm_hv_phy_workaround_ich8lan(sc);
   3861 	if (sc->sc_type == WM_T_PCH2)
   3862 		wm_lv_phy_workaround_ich8lan(sc);
   3863 
   3864 	/* Clear the host wakeup bit after lcd reset */
   3865 	if (sc->sc_type >= WM_T_PCH) {
   3866 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3867 		    BM_PORT_GEN_CFG);
   3868 		reg &= ~BM_WUC_HOST_WU_BIT;
   3869 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3870 		    BM_PORT_GEN_CFG, reg);
   3871 	}
   3872 
   3873 	/* Configure the LCD with the extended configuration region in NVM */
   3874 	wm_init_lcd_from_nvm(sc);
   3875 
   3876 	/* Configure the LCD with the OEM bits in NVM */
   3877 }
   3878 
   3879 /* Only for PCH and newer */
   3880 static void
   3881 wm_write_smbus_addr(struct wm_softc *sc)
   3882 {
   3883 	uint32_t strap, freq;
   3884 	uint32_t phy_data;
   3885 
   3886 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3887 		device_xname(sc->sc_dev), __func__));
   3888 
   3889 	strap = CSR_READ(sc, WMREG_STRAP);
   3890 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3891 
   3892 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
   3893 
   3894 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3895 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3896 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3897 
   3898 	if (sc->sc_phytype == WMPHY_I217) {
   3899 		/* Restore SMBus frequency */
   3900 		if (freq --) {
   3901 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3902 			    | HV_SMB_ADDR_FREQ_HIGH);
   3903 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3904 			    HV_SMB_ADDR_FREQ_LOW);
   3905 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3906 			    HV_SMB_ADDR_FREQ_HIGH);
   3907 		} else {
   3908 			DPRINTF(WM_DEBUG_INIT,
   3909 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3910 				device_xname(sc->sc_dev), __func__));
   3911 		}
   3912 	}
   3913 
   3914 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
   3915 }
   3916 
   3917 void
   3918 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3919 {
   3920 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3921 	uint16_t phy_page = 0;
   3922 
   3923 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3924 		device_xname(sc->sc_dev), __func__));
   3925 
   3926 	switch (sc->sc_type) {
   3927 	case WM_T_ICH8:
   3928 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3929 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3930 			return;
   3931 
   3932 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3933 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3934 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3935 			break;
   3936 		}
   3937 		/* FALLTHROUGH */
   3938 	case WM_T_PCH:
   3939 	case WM_T_PCH2:
   3940 	case WM_T_PCH_LPT:
   3941 	case WM_T_PCH_SPT:
   3942 	case WM_T_PCH_CNP:
   3943 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3944 		break;
   3945 	default:
   3946 		return;
   3947 	}
   3948 
   3949 	sc->phy.acquire(sc);
   3950 
   3951 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3952 	if ((reg & sw_cfg_mask) == 0)
   3953 		goto release;
   3954 
   3955 	/*
   3956 	 * Make sure HW does not configure LCD from PHY extended configuration
   3957 	 * before SW configuration
   3958 	 */
   3959 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3960 	if ((sc->sc_type < WM_T_PCH2)
   3961 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3962 		goto release;
   3963 
   3964 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3965 		device_xname(sc->sc_dev), __func__));
   3966 	/* word_addr is in DWORD */
   3967 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3968 
   3969 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3970 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3971 	if (cnf_size == 0)
   3972 		goto release;
   3973 
   3974 	if (((sc->sc_type == WM_T_PCH)
   3975 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3976 	    || (sc->sc_type > WM_T_PCH)) {
   3977 		/*
   3978 		 * HW configures the SMBus address and LEDs when the OEM and
   3979 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3980 		 * are cleared, SW will configure them instead.
   3981 		 */
   3982 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   3983 			device_xname(sc->sc_dev), __func__));
   3984 		wm_write_smbus_addr(sc);
   3985 
   3986 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3987 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3988 	}
   3989 
   3990 	/* Configure LCD from extended configuration region. */
   3991 	for (i = 0; i < cnf_size; i++) {
   3992 		uint16_t reg_data, reg_addr;
   3993 
   3994 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   3995 			goto release;
   3996 
   3997 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   3998 			goto release;
   3999 
   4000 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4001 			phy_page = reg_data;
   4002 
   4003 		reg_addr &= IGPHY_MAXREGADDR;
   4004 		reg_addr |= phy_page;
   4005 
   4006 		sc->phy.release(sc); /* XXX */
   4007 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   4008 		sc->phy.acquire(sc); /* XXX */
   4009 	}
   4010 
   4011 release:
   4012 	sc->phy.release(sc);
   4013 	return;
   4014 }
   4015 
   4016 
   4017 /* Init hardware bits */
   4018 void
   4019 wm_initialize_hardware_bits(struct wm_softc *sc)
   4020 {
   4021 	uint32_t tarc0, tarc1, reg;
   4022 
   4023 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4024 		device_xname(sc->sc_dev), __func__));
   4025 
   4026 	/* For 82571 variant, 80003 and ICHs */
   4027 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4028 	    || (sc->sc_type >= WM_T_80003)) {
   4029 
   4030 		/* Transmit Descriptor Control 0 */
   4031 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4032 		reg |= TXDCTL_COUNT_DESC;
   4033 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4034 
   4035 		/* Transmit Descriptor Control 1 */
   4036 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4037 		reg |= TXDCTL_COUNT_DESC;
   4038 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4039 
   4040 		/* TARC0 */
   4041 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4042 		switch (sc->sc_type) {
   4043 		case WM_T_82571:
   4044 		case WM_T_82572:
   4045 		case WM_T_82573:
   4046 		case WM_T_82574:
   4047 		case WM_T_82583:
   4048 		case WM_T_80003:
   4049 			/* Clear bits 30..27 */
   4050 			tarc0 &= ~__BITS(30, 27);
   4051 			break;
   4052 		default:
   4053 			break;
   4054 		}
   4055 
   4056 		switch (sc->sc_type) {
   4057 		case WM_T_82571:
   4058 		case WM_T_82572:
   4059 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4060 
   4061 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4062 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4063 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4064 			/* 8257[12] Errata No.7 */
   4065 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4066 
   4067 			/* TARC1 bit 28 */
   4068 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4069 				tarc1 &= ~__BIT(28);
   4070 			else
   4071 				tarc1 |= __BIT(28);
   4072 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4073 
   4074 			/*
   4075 			 * 8257[12] Errata No.13
   4076 			 * Disable Dyamic Clock Gating.
   4077 			 */
   4078 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4079 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4080 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4081 			break;
   4082 		case WM_T_82573:
   4083 		case WM_T_82574:
   4084 		case WM_T_82583:
   4085 			if ((sc->sc_type == WM_T_82574)
   4086 			    || (sc->sc_type == WM_T_82583))
   4087 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4088 
   4089 			/* Extended Device Control */
   4090 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4091 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4092 			reg |= __BIT(22);	/* Set bit 22 */
   4093 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4094 
   4095 			/* Device Control */
   4096 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4097 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4098 
   4099 			/* PCIe Control Register */
   4100 			/*
   4101 			 * 82573 Errata (unknown).
   4102 			 *
   4103 			 * 82574 Errata 25 and 82583 Errata 12
   4104 			 * "Dropped Rx Packets":
   4105 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4106 			 */
   4107 			reg = CSR_READ(sc, WMREG_GCR);
   4108 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4109 			CSR_WRITE(sc, WMREG_GCR, reg);
   4110 
   4111 			if ((sc->sc_type == WM_T_82574)
   4112 			    || (sc->sc_type == WM_T_82583)) {
   4113 				/*
   4114 				 * Document says this bit must be set for
   4115 				 * proper operation.
   4116 				 */
   4117 				reg = CSR_READ(sc, WMREG_GCR);
   4118 				reg |= __BIT(22);
   4119 				CSR_WRITE(sc, WMREG_GCR, reg);
   4120 
   4121 				/*
   4122 				 * Apply workaround for hardware errata
   4123 				 * documented in errata docs Fixes issue where
   4124 				 * some error prone or unreliable PCIe
   4125 				 * completions are occurring, particularly
   4126 				 * with ASPM enabled. Without fix, issue can
   4127 				 * cause Tx timeouts.
   4128 				 */
   4129 				reg = CSR_READ(sc, WMREG_GCR2);
   4130 				reg |= __BIT(0);
   4131 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4132 			}
   4133 			break;
   4134 		case WM_T_80003:
   4135 			/* TARC0 */
   4136 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4137 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4138 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4139 
   4140 			/* TARC1 bit 28 */
   4141 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4142 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4143 				tarc1 &= ~__BIT(28);
   4144 			else
   4145 				tarc1 |= __BIT(28);
   4146 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4147 			break;
   4148 		case WM_T_ICH8:
   4149 		case WM_T_ICH9:
   4150 		case WM_T_ICH10:
   4151 		case WM_T_PCH:
   4152 		case WM_T_PCH2:
   4153 		case WM_T_PCH_LPT:
   4154 		case WM_T_PCH_SPT:
   4155 		case WM_T_PCH_CNP:
   4156 			/* TARC0 */
   4157 			if (sc->sc_type == WM_T_ICH8) {
   4158 				/* Set TARC0 bits 29 and 28 */
   4159 				tarc0 |= __BITS(29, 28);
   4160 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4161 				tarc0 |= __BIT(29);
   4162 				/*
   4163 				 *  Drop bit 28. From Linux.
   4164 				 * See I218/I219 spec update
   4165 				 * "5. Buffer Overrun While the I219 is
   4166 				 * Processing DMA Transactions"
   4167 				 */
   4168 				tarc0 &= ~__BIT(28);
   4169 			}
   4170 			/* Set TARC0 bits 23,24,26,27 */
   4171 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4172 
   4173 			/* CTRL_EXT */
   4174 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4175 			reg |= __BIT(22);	/* Set bit 22 */
   4176 			/*
   4177 			 * Enable PHY low-power state when MAC is at D3
   4178 			 * w/o WoL
   4179 			 */
   4180 			if (sc->sc_type >= WM_T_PCH)
   4181 				reg |= CTRL_EXT_PHYPDEN;
   4182 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4183 
   4184 			/* TARC1 */
   4185 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4186 			/* bit 28 */
   4187 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4188 				tarc1 &= ~__BIT(28);
   4189 			else
   4190 				tarc1 |= __BIT(28);
   4191 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4192 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4193 
   4194 			/* Device Status */
   4195 			if (sc->sc_type == WM_T_ICH8) {
   4196 				reg = CSR_READ(sc, WMREG_STATUS);
   4197 				reg &= ~__BIT(31);
   4198 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4199 
   4200 			}
   4201 
   4202 			/* IOSFPC */
   4203 			if (sc->sc_type == WM_T_PCH_SPT) {
   4204 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4205 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4206 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4207 			}
   4208 			/*
   4209 			 * Work-around descriptor data corruption issue during
   4210 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4211 			 * capability.
   4212 			 */
   4213 			reg = CSR_READ(sc, WMREG_RFCTL);
   4214 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4215 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4216 			break;
   4217 		default:
   4218 			break;
   4219 		}
   4220 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4221 
   4222 		switch (sc->sc_type) {
   4223 		/*
   4224 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4225 		 * Avoid RSS Hash Value bug.
   4226 		 */
   4227 		case WM_T_82571:
   4228 		case WM_T_82572:
   4229 		case WM_T_82573:
   4230 		case WM_T_80003:
   4231 		case WM_T_ICH8:
   4232 			reg = CSR_READ(sc, WMREG_RFCTL);
   4233 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4234 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4235 			break;
   4236 		case WM_T_82574:
   4237 			/* use extened Rx descriptor. */
   4238 			reg = CSR_READ(sc, WMREG_RFCTL);
   4239 			reg |= WMREG_RFCTL_EXSTEN;
   4240 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4241 			break;
   4242 		default:
   4243 			break;
   4244 		}
   4245 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4246 		/*
   4247 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4248 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4249 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4250 		 * Correctly by the Device"
   4251 		 *
   4252 		 * I354(C2000) Errata AVR53:
   4253 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4254 		 * Hang"
   4255 		 */
   4256 		reg = CSR_READ(sc, WMREG_RFCTL);
   4257 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4258 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4259 	}
   4260 }
   4261 
   4262 static uint32_t
   4263 wm_rxpbs_adjust_82580(uint32_t val)
   4264 {
   4265 	uint32_t rv = 0;
   4266 
   4267 	if (val < __arraycount(wm_82580_rxpbs_table))
   4268 		rv = wm_82580_rxpbs_table[val];
   4269 
   4270 	return rv;
   4271 }
   4272 
   4273 /*
   4274  * wm_reset_phy:
   4275  *
   4276  *	generic PHY reset function.
   4277  *	Same as e1000_phy_hw_reset_generic()
   4278  */
   4279 static void
   4280 wm_reset_phy(struct wm_softc *sc)
   4281 {
   4282 	uint32_t reg;
   4283 
   4284 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4285 		device_xname(sc->sc_dev), __func__));
   4286 	if (wm_phy_resetisblocked(sc))
   4287 		return;
   4288 
   4289 	sc->phy.acquire(sc);
   4290 
   4291 	reg = CSR_READ(sc, WMREG_CTRL);
   4292 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4293 	CSR_WRITE_FLUSH(sc);
   4294 
   4295 	delay(sc->phy.reset_delay_us);
   4296 
   4297 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4298 	CSR_WRITE_FLUSH(sc);
   4299 
   4300 	delay(150);
   4301 
   4302 	sc->phy.release(sc);
   4303 
   4304 	wm_get_cfg_done(sc);
   4305 	wm_phy_post_reset(sc);
   4306 }
   4307 
   4308 /*
   4309  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4310  * so it is enough to check sc->sc_queue[0] only.
   4311  */
   4312 static void
   4313 wm_flush_desc_rings(struct wm_softc *sc)
   4314 {
   4315 	pcireg_t preg;
   4316 	uint32_t reg;
   4317 	struct wm_txqueue *txq;
   4318 	wiseman_txdesc_t *txd;
   4319 	int nexttx;
   4320 	uint32_t rctl;
   4321 
   4322 	/* First, disable MULR fix in FEXTNVM11 */
   4323 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4324 	reg |= FEXTNVM11_DIS_MULRFIX;
   4325 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4326 
   4327 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4328 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4329 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4330 		return;
   4331 
   4332 	/* TX */
   4333 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4334 	    device_xname(sc->sc_dev), preg, reg);
   4335 	reg = CSR_READ(sc, WMREG_TCTL);
   4336 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4337 
   4338 	txq = &sc->sc_queue[0].wmq_txq;
   4339 	nexttx = txq->txq_next;
   4340 	txd = &txq->txq_descs[nexttx];
   4341 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4342 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4343 	txd->wtx_fields.wtxu_status = 0;
   4344 	txd->wtx_fields.wtxu_options = 0;
   4345 	txd->wtx_fields.wtxu_vlan = 0;
   4346 
   4347 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4348 	    BUS_SPACE_BARRIER_WRITE);
   4349 
   4350 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4351 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4352 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4353 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4354 	delay(250);
   4355 
   4356 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4357 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4358 		return;
   4359 
   4360 	/* RX */
   4361 	printf("%s: Need RX flush (reg = %08x)\n",
   4362 	    device_xname(sc->sc_dev), preg);
   4363 	rctl = CSR_READ(sc, WMREG_RCTL);
   4364 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4365 	CSR_WRITE_FLUSH(sc);
   4366 	delay(150);
   4367 
   4368 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4369 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4370 	reg &= 0xffffc000;
   4371 	/*
   4372 	 * update thresholds: prefetch threshold to 31, host threshold
   4373 	 * to 1 and make sure the granularity is "descriptors" and not
   4374 	 * "cache lines"
   4375 	 */
   4376 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4377 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4378 
   4379 	/*
   4380 	 * momentarily enable the RX ring for the changes to take
   4381 	 * effect
   4382 	 */
   4383 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4384 	CSR_WRITE_FLUSH(sc);
   4385 	delay(150);
   4386 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4387 }
   4388 
   4389 /*
   4390  * wm_reset:
   4391  *
   4392  *	Reset the i82542 chip.
   4393  */
   4394 static void
   4395 wm_reset(struct wm_softc *sc)
   4396 {
   4397 	int phy_reset = 0;
   4398 	int i, error = 0;
   4399 	uint32_t reg;
   4400 	uint16_t kmreg;
   4401 	int rv;
   4402 
   4403 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4404 		device_xname(sc->sc_dev), __func__));
   4405 	KASSERT(sc->sc_type != 0);
   4406 
   4407 	/*
   4408 	 * Allocate on-chip memory according to the MTU size.
   4409 	 * The Packet Buffer Allocation register must be written
   4410 	 * before the chip is reset.
   4411 	 */
   4412 	switch (sc->sc_type) {
   4413 	case WM_T_82547:
   4414 	case WM_T_82547_2:
   4415 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4416 		    PBA_22K : PBA_30K;
   4417 		for (i = 0; i < sc->sc_nqueues; i++) {
   4418 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4419 			txq->txq_fifo_head = 0;
   4420 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4421 			txq->txq_fifo_size =
   4422 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4423 			txq->txq_fifo_stall = 0;
   4424 		}
   4425 		break;
   4426 	case WM_T_82571:
   4427 	case WM_T_82572:
   4428 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4429 	case WM_T_80003:
   4430 		sc->sc_pba = PBA_32K;
   4431 		break;
   4432 	case WM_T_82573:
   4433 		sc->sc_pba = PBA_12K;
   4434 		break;
   4435 	case WM_T_82574:
   4436 	case WM_T_82583:
   4437 		sc->sc_pba = PBA_20K;
   4438 		break;
   4439 	case WM_T_82576:
   4440 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4441 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4442 		break;
   4443 	case WM_T_82580:
   4444 	case WM_T_I350:
   4445 	case WM_T_I354:
   4446 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4447 		break;
   4448 	case WM_T_I210:
   4449 	case WM_T_I211:
   4450 		sc->sc_pba = PBA_34K;
   4451 		break;
   4452 	case WM_T_ICH8:
   4453 		/* Workaround for a bit corruption issue in FIFO memory */
   4454 		sc->sc_pba = PBA_8K;
   4455 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4456 		break;
   4457 	case WM_T_ICH9:
   4458 	case WM_T_ICH10:
   4459 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4460 		    PBA_14K : PBA_10K;
   4461 		break;
   4462 	case WM_T_PCH:
   4463 	case WM_T_PCH2:	/* XXX 14K? */
   4464 	case WM_T_PCH_LPT:
   4465 	case WM_T_PCH_SPT:
   4466 	case WM_T_PCH_CNP:
   4467 		sc->sc_pba = PBA_26K;
   4468 		break;
   4469 	default:
   4470 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4471 		    PBA_40K : PBA_48K;
   4472 		break;
   4473 	}
   4474 	/*
   4475 	 * Only old or non-multiqueue devices have the PBA register
   4476 	 * XXX Need special handling for 82575.
   4477 	 */
   4478 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4479 	    || (sc->sc_type == WM_T_82575))
   4480 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4481 
   4482 	/* Prevent the PCI-E bus from sticking */
   4483 	if (sc->sc_flags & WM_F_PCIE) {
   4484 		int timeout = 800;
   4485 
   4486 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4487 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4488 
   4489 		while (timeout--) {
   4490 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4491 			    == 0)
   4492 				break;
   4493 			delay(100);
   4494 		}
   4495 		if (timeout == 0)
   4496 			device_printf(sc->sc_dev,
   4497 			    "failed to disable busmastering\n");
   4498 	}
   4499 
   4500 	/* Set the completion timeout for interface */
   4501 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4502 	    || (sc->sc_type == WM_T_82580)
   4503 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4504 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4505 		wm_set_pcie_completion_timeout(sc);
   4506 
   4507 	/* Clear interrupt */
   4508 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4509 	if (wm_is_using_msix(sc)) {
   4510 		if (sc->sc_type != WM_T_82574) {
   4511 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4512 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4513 		} else {
   4514 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4515 		}
   4516 	}
   4517 
   4518 	/* Stop the transmit and receive processes. */
   4519 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4520 	sc->sc_rctl &= ~RCTL_EN;
   4521 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4522 	CSR_WRITE_FLUSH(sc);
   4523 
   4524 	/* XXX set_tbi_sbp_82543() */
   4525 
   4526 	delay(10*1000);
   4527 
   4528 	/* Must acquire the MDIO ownership before MAC reset */
   4529 	switch (sc->sc_type) {
   4530 	case WM_T_82573:
   4531 	case WM_T_82574:
   4532 	case WM_T_82583:
   4533 		error = wm_get_hw_semaphore_82573(sc);
   4534 		break;
   4535 	default:
   4536 		break;
   4537 	}
   4538 
   4539 	/*
   4540 	 * 82541 Errata 29? & 82547 Errata 28?
   4541 	 * See also the description about PHY_RST bit in CTRL register
   4542 	 * in 8254x_GBe_SDM.pdf.
   4543 	 */
   4544 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4545 		CSR_WRITE(sc, WMREG_CTRL,
   4546 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4547 		CSR_WRITE_FLUSH(sc);
   4548 		delay(5000);
   4549 	}
   4550 
   4551 	switch (sc->sc_type) {
   4552 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4553 	case WM_T_82541:
   4554 	case WM_T_82541_2:
   4555 	case WM_T_82547:
   4556 	case WM_T_82547_2:
   4557 		/*
   4558 		 * On some chipsets, a reset through a memory-mapped write
   4559 		 * cycle can cause the chip to reset before completing the
   4560 		 * write cycle. This causes major headache that can be avoided
   4561 		 * by issuing the reset via indirect register writes through
   4562 		 * I/O space.
   4563 		 *
   4564 		 * So, if we successfully mapped the I/O BAR at attach time,
   4565 		 * use that. Otherwise, try our luck with a memory-mapped
   4566 		 * reset.
   4567 		 */
   4568 		if (sc->sc_flags & WM_F_IOH_VALID)
   4569 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4570 		else
   4571 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4572 		break;
   4573 	case WM_T_82545_3:
   4574 	case WM_T_82546_3:
   4575 		/* Use the shadow control register on these chips. */
   4576 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4577 		break;
   4578 	case WM_T_80003:
   4579 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4580 		sc->phy.acquire(sc);
   4581 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4582 		sc->phy.release(sc);
   4583 		break;
   4584 	case WM_T_ICH8:
   4585 	case WM_T_ICH9:
   4586 	case WM_T_ICH10:
   4587 	case WM_T_PCH:
   4588 	case WM_T_PCH2:
   4589 	case WM_T_PCH_LPT:
   4590 	case WM_T_PCH_SPT:
   4591 	case WM_T_PCH_CNP:
   4592 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4593 		if (wm_phy_resetisblocked(sc) == false) {
   4594 			/*
   4595 			 * Gate automatic PHY configuration by hardware on
   4596 			 * non-managed 82579
   4597 			 */
   4598 			if ((sc->sc_type == WM_T_PCH2)
   4599 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4600 				== 0))
   4601 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4602 
   4603 			reg |= CTRL_PHY_RESET;
   4604 			phy_reset = 1;
   4605 		} else
   4606 			printf("XXX reset is blocked!!!\n");
   4607 		sc->phy.acquire(sc);
   4608 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4609 		/* Don't insert a completion barrier when reset */
   4610 		delay(20*1000);
   4611 		mutex_exit(sc->sc_ich_phymtx);
   4612 		break;
   4613 	case WM_T_82580:
   4614 	case WM_T_I350:
   4615 	case WM_T_I354:
   4616 	case WM_T_I210:
   4617 	case WM_T_I211:
   4618 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4619 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4620 			CSR_WRITE_FLUSH(sc);
   4621 		delay(5000);
   4622 		break;
   4623 	case WM_T_82542_2_0:
   4624 	case WM_T_82542_2_1:
   4625 	case WM_T_82543:
   4626 	case WM_T_82540:
   4627 	case WM_T_82545:
   4628 	case WM_T_82546:
   4629 	case WM_T_82571:
   4630 	case WM_T_82572:
   4631 	case WM_T_82573:
   4632 	case WM_T_82574:
   4633 	case WM_T_82575:
   4634 	case WM_T_82576:
   4635 	case WM_T_82583:
   4636 	default:
   4637 		/* Everything else can safely use the documented method. */
   4638 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4639 		break;
   4640 	}
   4641 
   4642 	/* Must release the MDIO ownership after MAC reset */
   4643 	switch (sc->sc_type) {
   4644 	case WM_T_82573:
   4645 	case WM_T_82574:
   4646 	case WM_T_82583:
   4647 		if (error == 0)
   4648 			wm_put_hw_semaphore_82573(sc);
   4649 		break;
   4650 	default:
   4651 		break;
   4652 	}
   4653 
   4654 	if (phy_reset != 0)
   4655 		wm_get_cfg_done(sc);
   4656 
   4657 	/* reload EEPROM */
   4658 	switch (sc->sc_type) {
   4659 	case WM_T_82542_2_0:
   4660 	case WM_T_82542_2_1:
   4661 	case WM_T_82543:
   4662 	case WM_T_82544:
   4663 		delay(10);
   4664 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4665 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4666 		CSR_WRITE_FLUSH(sc);
   4667 		delay(2000);
   4668 		break;
   4669 	case WM_T_82540:
   4670 	case WM_T_82545:
   4671 	case WM_T_82545_3:
   4672 	case WM_T_82546:
   4673 	case WM_T_82546_3:
   4674 		delay(5*1000);
   4675 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4676 		break;
   4677 	case WM_T_82541:
   4678 	case WM_T_82541_2:
   4679 	case WM_T_82547:
   4680 	case WM_T_82547_2:
   4681 		delay(20000);
   4682 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4683 		break;
   4684 	case WM_T_82571:
   4685 	case WM_T_82572:
   4686 	case WM_T_82573:
   4687 	case WM_T_82574:
   4688 	case WM_T_82583:
   4689 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4690 			delay(10);
   4691 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4692 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4693 			CSR_WRITE_FLUSH(sc);
   4694 		}
   4695 		/* check EECD_EE_AUTORD */
   4696 		wm_get_auto_rd_done(sc);
   4697 		/*
   4698 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4699 		 * is set.
   4700 		 */
   4701 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4702 		    || (sc->sc_type == WM_T_82583))
   4703 			delay(25*1000);
   4704 		break;
   4705 	case WM_T_82575:
   4706 	case WM_T_82576:
   4707 	case WM_T_82580:
   4708 	case WM_T_I350:
   4709 	case WM_T_I354:
   4710 	case WM_T_I210:
   4711 	case WM_T_I211:
   4712 	case WM_T_80003:
   4713 		/* check EECD_EE_AUTORD */
   4714 		wm_get_auto_rd_done(sc);
   4715 		break;
   4716 	case WM_T_ICH8:
   4717 	case WM_T_ICH9:
   4718 	case WM_T_ICH10:
   4719 	case WM_T_PCH:
   4720 	case WM_T_PCH2:
   4721 	case WM_T_PCH_LPT:
   4722 	case WM_T_PCH_SPT:
   4723 	case WM_T_PCH_CNP:
   4724 		break;
   4725 	default:
   4726 		panic("%s: unknown type\n", __func__);
   4727 	}
   4728 
   4729 	/* Check whether EEPROM is present or not */
   4730 	switch (sc->sc_type) {
   4731 	case WM_T_82575:
   4732 	case WM_T_82576:
   4733 	case WM_T_82580:
   4734 	case WM_T_I350:
   4735 	case WM_T_I354:
   4736 	case WM_T_ICH8:
   4737 	case WM_T_ICH9:
   4738 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4739 			/* Not found */
   4740 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4741 			if (sc->sc_type == WM_T_82575)
   4742 				wm_reset_init_script_82575(sc);
   4743 		}
   4744 		break;
   4745 	default:
   4746 		break;
   4747 	}
   4748 
   4749 	if (phy_reset != 0)
   4750 		wm_phy_post_reset(sc);
   4751 
   4752 	if ((sc->sc_type == WM_T_82580)
   4753 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4754 		/* clear global device reset status bit */
   4755 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4756 	}
   4757 
   4758 	/* Clear any pending interrupt events. */
   4759 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4760 	reg = CSR_READ(sc, WMREG_ICR);
   4761 	if (wm_is_using_msix(sc)) {
   4762 		if (sc->sc_type != WM_T_82574) {
   4763 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4764 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4765 		} else
   4766 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4767 	}
   4768 
   4769 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4770 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4771 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4772 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4773 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4774 		reg |= KABGTXD_BGSQLBIAS;
   4775 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4776 	}
   4777 
   4778 	/* reload sc_ctrl */
   4779 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4780 
   4781 	if (sc->sc_type == WM_T_I354) {
   4782 #if 0
   4783 		/* I354 uses an external PHY */
   4784 		wm_set_eee_i354(sc);
   4785 #endif
   4786 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4787 		wm_set_eee_i350(sc);
   4788 
   4789 	/*
   4790 	 * For PCH, this write will make sure that any noise will be detected
   4791 	 * as a CRC error and be dropped rather than show up as a bad packet
   4792 	 * to the DMA engine
   4793 	 */
   4794 	if (sc->sc_type == WM_T_PCH)
   4795 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4796 
   4797 	if (sc->sc_type >= WM_T_82544)
   4798 		CSR_WRITE(sc, WMREG_WUC, 0);
   4799 
   4800 	wm_reset_mdicnfg_82580(sc);
   4801 
   4802 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4803 		wm_pll_workaround_i210(sc);
   4804 
   4805 	if (sc->sc_type == WM_T_80003) {
   4806 		/* default to TRUE to enable the MDIC W/A */
   4807 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4808 
   4809 		rv = wm_kmrn_readreg(sc,
   4810 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4811 		if (rv == 0) {
   4812 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4813 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4814 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4815 			else
   4816 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4817 		}
   4818 	}
   4819 }
   4820 
   4821 /*
   4822  * wm_add_rxbuf:
   4823  *
   4824  *	Add a receive buffer to the indiciated descriptor.
   4825  */
   4826 static int
   4827 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4828 {
   4829 	struct wm_softc *sc = rxq->rxq_sc;
   4830 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4831 	struct mbuf *m;
   4832 	int error;
   4833 
   4834 	KASSERT(mutex_owned(rxq->rxq_lock));
   4835 
   4836 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4837 	if (m == NULL)
   4838 		return ENOBUFS;
   4839 
   4840 	MCLGET(m, M_DONTWAIT);
   4841 	if ((m->m_flags & M_EXT) == 0) {
   4842 		m_freem(m);
   4843 		return ENOBUFS;
   4844 	}
   4845 
   4846 	if (rxs->rxs_mbuf != NULL)
   4847 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4848 
   4849 	rxs->rxs_mbuf = m;
   4850 
   4851 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4852 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4853 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4854 	if (error) {
   4855 		/* XXX XXX XXX */
   4856 		aprint_error_dev(sc->sc_dev,
   4857 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   4858 		panic("wm_add_rxbuf");
   4859 	}
   4860 
   4861 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4862 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4863 
   4864 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4865 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4866 			wm_init_rxdesc(rxq, idx);
   4867 	} else
   4868 		wm_init_rxdesc(rxq, idx);
   4869 
   4870 	return 0;
   4871 }
   4872 
   4873 /*
   4874  * wm_rxdrain:
   4875  *
   4876  *	Drain the receive queue.
   4877  */
   4878 static void
   4879 wm_rxdrain(struct wm_rxqueue *rxq)
   4880 {
   4881 	struct wm_softc *sc = rxq->rxq_sc;
   4882 	struct wm_rxsoft *rxs;
   4883 	int i;
   4884 
   4885 	KASSERT(mutex_owned(rxq->rxq_lock));
   4886 
   4887 	for (i = 0; i < WM_NRXDESC; i++) {
   4888 		rxs = &rxq->rxq_soft[i];
   4889 		if (rxs->rxs_mbuf != NULL) {
   4890 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4891 			m_freem(rxs->rxs_mbuf);
   4892 			rxs->rxs_mbuf = NULL;
   4893 		}
   4894 	}
   4895 }
   4896 
   4897 /*
   4898  * Setup registers for RSS.
   4899  *
   4900  * XXX not yet VMDq support
   4901  */
   4902 static void
   4903 wm_init_rss(struct wm_softc *sc)
   4904 {
   4905 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4906 	int i;
   4907 
   4908 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   4909 
   4910 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4911 		int qid, reta_ent;
   4912 
   4913 		qid  = i % sc->sc_nqueues;
   4914 		switch (sc->sc_type) {
   4915 		case WM_T_82574:
   4916 			reta_ent = __SHIFTIN(qid,
   4917 			    RETA_ENT_QINDEX_MASK_82574);
   4918 			break;
   4919 		case WM_T_82575:
   4920 			reta_ent = __SHIFTIN(qid,
   4921 			    RETA_ENT_QINDEX1_MASK_82575);
   4922 			break;
   4923 		default:
   4924 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4925 			break;
   4926 		}
   4927 
   4928 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4929 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4930 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4931 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4932 	}
   4933 
   4934 	rss_getkey((uint8_t *)rss_key);
   4935 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4936 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4937 
   4938 	if (sc->sc_type == WM_T_82574)
   4939 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4940 	else
   4941 		mrqc = MRQC_ENABLE_RSS_MQ;
   4942 
   4943 	/*
   4944 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4945 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4946 	 */
   4947 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4948 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4949 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4950 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4951 
   4952 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4953 }
   4954 
   4955 /*
   4956  * Adjust TX and RX queue numbers which the system actulally uses.
   4957  *
   4958  * The numbers are affected by below parameters.
   4959  *     - The nubmer of hardware queues
   4960  *     - The number of MSI-X vectors (= "nvectors" argument)
   4961  *     - ncpu
   4962  */
   4963 static void
   4964 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4965 {
   4966 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4967 
   4968 	if (nvectors < 2) {
   4969 		sc->sc_nqueues = 1;
   4970 		return;
   4971 	}
   4972 
   4973 	switch (sc->sc_type) {
   4974 	case WM_T_82572:
   4975 		hw_ntxqueues = 2;
   4976 		hw_nrxqueues = 2;
   4977 		break;
   4978 	case WM_T_82574:
   4979 		hw_ntxqueues = 2;
   4980 		hw_nrxqueues = 2;
   4981 		break;
   4982 	case WM_T_82575:
   4983 		hw_ntxqueues = 4;
   4984 		hw_nrxqueues = 4;
   4985 		break;
   4986 	case WM_T_82576:
   4987 		hw_ntxqueues = 16;
   4988 		hw_nrxqueues = 16;
   4989 		break;
   4990 	case WM_T_82580:
   4991 	case WM_T_I350:
   4992 	case WM_T_I354:
   4993 		hw_ntxqueues = 8;
   4994 		hw_nrxqueues = 8;
   4995 		break;
   4996 	case WM_T_I210:
   4997 		hw_ntxqueues = 4;
   4998 		hw_nrxqueues = 4;
   4999 		break;
   5000 	case WM_T_I211:
   5001 		hw_ntxqueues = 2;
   5002 		hw_nrxqueues = 2;
   5003 		break;
   5004 		/*
   5005 		 * As below ethernet controllers does not support MSI-X,
   5006 		 * this driver let them not use multiqueue.
   5007 		 *     - WM_T_80003
   5008 		 *     - WM_T_ICH8
   5009 		 *     - WM_T_ICH9
   5010 		 *     - WM_T_ICH10
   5011 		 *     - WM_T_PCH
   5012 		 *     - WM_T_PCH2
   5013 		 *     - WM_T_PCH_LPT
   5014 		 */
   5015 	default:
   5016 		hw_ntxqueues = 1;
   5017 		hw_nrxqueues = 1;
   5018 		break;
   5019 	}
   5020 
   5021 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5022 
   5023 	/*
   5024 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5025 	 * the number of queues used actually.
   5026 	 */
   5027 	if (nvectors < hw_nqueues + 1)
   5028 		sc->sc_nqueues = nvectors - 1;
   5029 	else
   5030 		sc->sc_nqueues = hw_nqueues;
   5031 
   5032 	/*
   5033 	 * As queues more then cpus cannot improve scaling, we limit
   5034 	 * the number of queues used actually.
   5035 	 */
   5036 	if (ncpu < sc->sc_nqueues)
   5037 		sc->sc_nqueues = ncpu;
   5038 }
   5039 
   5040 static inline bool
   5041 wm_is_using_msix(struct wm_softc *sc)
   5042 {
   5043 
   5044 	return (sc->sc_nintrs > 1);
   5045 }
   5046 
   5047 static inline bool
   5048 wm_is_using_multiqueue(struct wm_softc *sc)
   5049 {
   5050 
   5051 	return (sc->sc_nqueues > 1);
   5052 }
   5053 
   5054 static int
   5055 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5056 {
   5057 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5058 	wmq->wmq_id = qidx;
   5059 	wmq->wmq_intr_idx = intr_idx;
   5060 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5061 #ifdef WM_MPSAFE
   5062 	    | SOFTINT_MPSAFE
   5063 #endif
   5064 	    , wm_handle_queue, wmq);
   5065 	if (wmq->wmq_si != NULL)
   5066 		return 0;
   5067 
   5068 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5069 	    wmq->wmq_id);
   5070 
   5071 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5072 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5073 	return ENOMEM;
   5074 }
   5075 
   5076 /*
   5077  * Both single interrupt MSI and INTx can use this function.
   5078  */
   5079 static int
   5080 wm_setup_legacy(struct wm_softc *sc)
   5081 {
   5082 	pci_chipset_tag_t pc = sc->sc_pc;
   5083 	const char *intrstr = NULL;
   5084 	char intrbuf[PCI_INTRSTR_LEN];
   5085 	int error;
   5086 
   5087 	error = wm_alloc_txrx_queues(sc);
   5088 	if (error) {
   5089 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5090 		    error);
   5091 		return ENOMEM;
   5092 	}
   5093 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5094 	    sizeof(intrbuf));
   5095 #ifdef WM_MPSAFE
   5096 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5097 #endif
   5098 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5099 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5100 	if (sc->sc_ihs[0] == NULL) {
   5101 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5102 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5103 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5104 		return ENOMEM;
   5105 	}
   5106 
   5107 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5108 	sc->sc_nintrs = 1;
   5109 
   5110 	return wm_softint_establish(sc, 0, 0);
   5111 }
   5112 
   5113 static int
   5114 wm_setup_msix(struct wm_softc *sc)
   5115 {
   5116 	void *vih;
   5117 	kcpuset_t *affinity;
   5118 	int qidx, error, intr_idx, txrx_established;
   5119 	pci_chipset_tag_t pc = sc->sc_pc;
   5120 	const char *intrstr = NULL;
   5121 	char intrbuf[PCI_INTRSTR_LEN];
   5122 	char intr_xname[INTRDEVNAMEBUF];
   5123 
   5124 	if (sc->sc_nqueues < ncpu) {
   5125 		/*
   5126 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5127 		 * interrupts start from CPU#1.
   5128 		 */
   5129 		sc->sc_affinity_offset = 1;
   5130 	} else {
   5131 		/*
   5132 		 * In this case, this device use all CPUs. So, we unify
   5133 		 * affinitied cpu_index to msix vector number for readability.
   5134 		 */
   5135 		sc->sc_affinity_offset = 0;
   5136 	}
   5137 
   5138 	error = wm_alloc_txrx_queues(sc);
   5139 	if (error) {
   5140 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5141 		    error);
   5142 		return ENOMEM;
   5143 	}
   5144 
   5145 	kcpuset_create(&affinity, false);
   5146 	intr_idx = 0;
   5147 
   5148 	/*
   5149 	 * TX and RX
   5150 	 */
   5151 	txrx_established = 0;
   5152 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5153 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5154 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5155 
   5156 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5157 		    sizeof(intrbuf));
   5158 #ifdef WM_MPSAFE
   5159 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5160 		    PCI_INTR_MPSAFE, true);
   5161 #endif
   5162 		memset(intr_xname, 0, sizeof(intr_xname));
   5163 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5164 		    device_xname(sc->sc_dev), qidx);
   5165 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5166 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5167 		if (vih == NULL) {
   5168 			aprint_error_dev(sc->sc_dev,
   5169 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5170 			    intrstr ? " at " : "",
   5171 			    intrstr ? intrstr : "");
   5172 
   5173 			goto fail;
   5174 		}
   5175 		kcpuset_zero(affinity);
   5176 		/* Round-robin affinity */
   5177 		kcpuset_set(affinity, affinity_to);
   5178 		error = interrupt_distribute(vih, affinity, NULL);
   5179 		if (error == 0) {
   5180 			aprint_normal_dev(sc->sc_dev,
   5181 			    "for TX and RX interrupting at %s affinity to %u\n",
   5182 			    intrstr, affinity_to);
   5183 		} else {
   5184 			aprint_normal_dev(sc->sc_dev,
   5185 			    "for TX and RX interrupting at %s\n", intrstr);
   5186 		}
   5187 		sc->sc_ihs[intr_idx] = vih;
   5188 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5189 			goto fail;
   5190 		txrx_established++;
   5191 		intr_idx++;
   5192 	}
   5193 
   5194 	/*
   5195 	 * LINK
   5196 	 */
   5197 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5198 	    sizeof(intrbuf));
   5199 #ifdef WM_MPSAFE
   5200 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5201 #endif
   5202 	memset(intr_xname, 0, sizeof(intr_xname));
   5203 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5204 	    device_xname(sc->sc_dev));
   5205 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5206 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5207 	if (vih == NULL) {
   5208 		aprint_error_dev(sc->sc_dev,
   5209 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5210 		    intrstr ? " at " : "",
   5211 		    intrstr ? intrstr : "");
   5212 
   5213 		goto fail;
   5214 	}
   5215 	/* keep default affinity to LINK interrupt */
   5216 	aprint_normal_dev(sc->sc_dev,
   5217 	    "for LINK interrupting at %s\n", intrstr);
   5218 	sc->sc_ihs[intr_idx] = vih;
   5219 	sc->sc_link_intr_idx = intr_idx;
   5220 
   5221 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5222 	kcpuset_destroy(affinity);
   5223 	return 0;
   5224 
   5225  fail:
   5226 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5227 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5228 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5229 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5230 	}
   5231 
   5232 	kcpuset_destroy(affinity);
   5233 	return ENOMEM;
   5234 }
   5235 
   5236 static void
   5237 wm_unset_stopping_flags(struct wm_softc *sc)
   5238 {
   5239 	int i;
   5240 
   5241 	KASSERT(WM_CORE_LOCKED(sc));
   5242 
   5243 	/*
   5244 	 * must unset stopping flags in ascending order.
   5245 	 */
   5246 	for (i = 0; i < sc->sc_nqueues; i++) {
   5247 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5248 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5249 
   5250 		mutex_enter(txq->txq_lock);
   5251 		txq->txq_stopping = false;
   5252 		mutex_exit(txq->txq_lock);
   5253 
   5254 		mutex_enter(rxq->rxq_lock);
   5255 		rxq->rxq_stopping = false;
   5256 		mutex_exit(rxq->rxq_lock);
   5257 	}
   5258 
   5259 	sc->sc_core_stopping = false;
   5260 }
   5261 
   5262 static void
   5263 wm_set_stopping_flags(struct wm_softc *sc)
   5264 {
   5265 	int i;
   5266 
   5267 	KASSERT(WM_CORE_LOCKED(sc));
   5268 
   5269 	sc->sc_core_stopping = true;
   5270 
   5271 	/*
   5272 	 * must set stopping flags in ascending order.
   5273 	 */
   5274 	for (i = 0; i < sc->sc_nqueues; i++) {
   5275 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5276 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5277 
   5278 		mutex_enter(rxq->rxq_lock);
   5279 		rxq->rxq_stopping = true;
   5280 		mutex_exit(rxq->rxq_lock);
   5281 
   5282 		mutex_enter(txq->txq_lock);
   5283 		txq->txq_stopping = true;
   5284 		mutex_exit(txq->txq_lock);
   5285 	}
   5286 }
   5287 
   5288 /*
   5289  * write interrupt interval value to ITR or EITR
   5290  */
   5291 static void
   5292 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5293 {
   5294 
   5295 	if (!wmq->wmq_set_itr)
   5296 		return;
   5297 
   5298 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5299 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5300 
   5301 		/*
   5302 		 * 82575 doesn't have CNT_INGR field.
   5303 		 * So, overwrite counter field by software.
   5304 		 */
   5305 		if (sc->sc_type == WM_T_82575)
   5306 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5307 		else
   5308 			eitr |= EITR_CNT_INGR;
   5309 
   5310 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5311 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5312 		/*
   5313 		 * 82574 has both ITR and EITR. SET EITR when we use
   5314 		 * the multi queue function with MSI-X.
   5315 		 */
   5316 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5317 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5318 	} else {
   5319 		KASSERT(wmq->wmq_id == 0);
   5320 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5321 	}
   5322 
   5323 	wmq->wmq_set_itr = false;
   5324 }
   5325 
   5326 /*
   5327  * TODO
   5328  * Below dynamic calculation of itr is almost the same as linux igb,
   5329  * however it does not fit to wm(4). So, we will have been disable AIM
   5330  * until we will find appropriate calculation of itr.
   5331  */
   5332 /*
   5333  * calculate interrupt interval value to be going to write register in
   5334  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5335  */
   5336 static void
   5337 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5338 {
   5339 #ifdef NOTYET
   5340 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5341 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5342 	uint32_t avg_size = 0;
   5343 	uint32_t new_itr;
   5344 
   5345 	if (rxq->rxq_packets)
   5346 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5347 	if (txq->txq_packets)
   5348 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5349 
   5350 	if (avg_size == 0) {
   5351 		new_itr = 450; /* restore default value */
   5352 		goto out;
   5353 	}
   5354 
   5355 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5356 	avg_size += 24;
   5357 
   5358 	/* Don't starve jumbo frames */
   5359 	avg_size = uimin(avg_size, 3000);
   5360 
   5361 	/* Give a little boost to mid-size frames */
   5362 	if ((avg_size > 300) && (avg_size < 1200))
   5363 		new_itr = avg_size / 3;
   5364 	else
   5365 		new_itr = avg_size / 2;
   5366 
   5367 out:
   5368 	/*
   5369 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5370 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5371 	 */
   5372 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5373 		new_itr *= 4;
   5374 
   5375 	if (new_itr != wmq->wmq_itr) {
   5376 		wmq->wmq_itr = new_itr;
   5377 		wmq->wmq_set_itr = true;
   5378 	} else
   5379 		wmq->wmq_set_itr = false;
   5380 
   5381 	rxq->rxq_packets = 0;
   5382 	rxq->rxq_bytes = 0;
   5383 	txq->txq_packets = 0;
   5384 	txq->txq_bytes = 0;
   5385 #endif
   5386 }
   5387 
   5388 /*
   5389  * wm_init:		[ifnet interface function]
   5390  *
   5391  *	Initialize the interface.
   5392  */
   5393 static int
   5394 wm_init(struct ifnet *ifp)
   5395 {
   5396 	struct wm_softc *sc = ifp->if_softc;
   5397 	int ret;
   5398 
   5399 	WM_CORE_LOCK(sc);
   5400 	ret = wm_init_locked(ifp);
   5401 	WM_CORE_UNLOCK(sc);
   5402 
   5403 	return ret;
   5404 }
   5405 
   5406 static int
   5407 wm_init_locked(struct ifnet *ifp)
   5408 {
   5409 	struct wm_softc *sc = ifp->if_softc;
   5410 	int i, j, trynum, error = 0;
   5411 	uint32_t reg;
   5412 
   5413 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5414 		device_xname(sc->sc_dev), __func__));
   5415 	KASSERT(WM_CORE_LOCKED(sc));
   5416 
   5417 	/*
   5418 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5419 	 * There is a small but measurable benefit to avoiding the adjusment
   5420 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5421 	 * on such platforms.  One possibility is that the DMA itself is
   5422 	 * slightly more efficient if the front of the entire packet (instead
   5423 	 * of the front of the headers) is aligned.
   5424 	 *
   5425 	 * Note we must always set align_tweak to 0 if we are using
   5426 	 * jumbo frames.
   5427 	 */
   5428 #ifdef __NO_STRICT_ALIGNMENT
   5429 	sc->sc_align_tweak = 0;
   5430 #else
   5431 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5432 		sc->sc_align_tweak = 0;
   5433 	else
   5434 		sc->sc_align_tweak = 2;
   5435 #endif /* __NO_STRICT_ALIGNMENT */
   5436 
   5437 	/* Cancel any pending I/O. */
   5438 	wm_stop_locked(ifp, 0);
   5439 
   5440 	/* update statistics before reset */
   5441 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5442 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5443 
   5444 	/* PCH_SPT hardware workaround */
   5445 	if (sc->sc_type == WM_T_PCH_SPT)
   5446 		wm_flush_desc_rings(sc);
   5447 
   5448 	/* Reset the chip to a known state. */
   5449 	wm_reset(sc);
   5450 
   5451 	/*
   5452 	 * AMT based hardware can now take control from firmware
   5453 	 * Do this after reset.
   5454 	 */
   5455 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5456 		wm_get_hw_control(sc);
   5457 
   5458 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5459 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5460 		wm_legacy_irq_quirk_spt(sc);
   5461 
   5462 	/* Init hardware bits */
   5463 	wm_initialize_hardware_bits(sc);
   5464 
   5465 	/* Reset the PHY. */
   5466 	if (sc->sc_flags & WM_F_HAS_MII)
   5467 		wm_gmii_reset(sc);
   5468 
   5469 	/* Calculate (E)ITR value */
   5470 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5471 		/*
   5472 		 * For NEWQUEUE's EITR (except for 82575).
   5473 		 * 82575's EITR should be set same throttling value as other
   5474 		 * old controllers' ITR because the interrupt/sec calculation
   5475 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5476 		 *
   5477 		 * 82574's EITR should be set same throttling value as ITR.
   5478 		 *
   5479 		 * For N interrupts/sec, set this value to:
   5480 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5481 		 */
   5482 		sc->sc_itr_init = 450;
   5483 	} else if (sc->sc_type >= WM_T_82543) {
   5484 		/*
   5485 		 * Set up the interrupt throttling register (units of 256ns)
   5486 		 * Note that a footnote in Intel's documentation says this
   5487 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5488 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5489 		 * that that is also true for the 1024ns units of the other
   5490 		 * interrupt-related timer registers -- so, really, we ought
   5491 		 * to divide this value by 4 when the link speed is low.
   5492 		 *
   5493 		 * XXX implement this division at link speed change!
   5494 		 */
   5495 
   5496 		/*
   5497 		 * For N interrupts/sec, set this value to:
   5498 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5499 		 * absolute and packet timer values to this value
   5500 		 * divided by 4 to get "simple timer" behavior.
   5501 		 */
   5502 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5503 	}
   5504 
   5505 	error = wm_init_txrx_queues(sc);
   5506 	if (error)
   5507 		goto out;
   5508 
   5509 	/*
   5510 	 * Clear out the VLAN table -- we don't use it (yet).
   5511 	 */
   5512 	CSR_WRITE(sc, WMREG_VET, 0);
   5513 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5514 		trynum = 10; /* Due to hw errata */
   5515 	else
   5516 		trynum = 1;
   5517 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5518 		for (j = 0; j < trynum; j++)
   5519 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5520 
   5521 	/*
   5522 	 * Set up flow-control parameters.
   5523 	 *
   5524 	 * XXX Values could probably stand some tuning.
   5525 	 */
   5526 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5527 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5528 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5529 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5530 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5531 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5532 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5533 	}
   5534 
   5535 	sc->sc_fcrtl = FCRTL_DFLT;
   5536 	if (sc->sc_type < WM_T_82543) {
   5537 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5538 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5539 	} else {
   5540 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5541 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5542 	}
   5543 
   5544 	if (sc->sc_type == WM_T_80003)
   5545 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5546 	else
   5547 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5548 
   5549 	/* Writes the control register. */
   5550 	wm_set_vlan(sc);
   5551 
   5552 	if (sc->sc_flags & WM_F_HAS_MII) {
   5553 		uint16_t kmreg;
   5554 
   5555 		switch (sc->sc_type) {
   5556 		case WM_T_80003:
   5557 		case WM_T_ICH8:
   5558 		case WM_T_ICH9:
   5559 		case WM_T_ICH10:
   5560 		case WM_T_PCH:
   5561 		case WM_T_PCH2:
   5562 		case WM_T_PCH_LPT:
   5563 		case WM_T_PCH_SPT:
   5564 		case WM_T_PCH_CNP:
   5565 			/*
   5566 			 * Set the mac to wait the maximum time between each
   5567 			 * iteration and increase the max iterations when
   5568 			 * polling the phy; this fixes erroneous timeouts at
   5569 			 * 10Mbps.
   5570 			 */
   5571 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5572 			    0xFFFF);
   5573 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5574 			    &kmreg);
   5575 			kmreg |= 0x3F;
   5576 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5577 			    kmreg);
   5578 			break;
   5579 		default:
   5580 			break;
   5581 		}
   5582 
   5583 		if (sc->sc_type == WM_T_80003) {
   5584 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5585 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5586 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5587 
   5588 			/* Bypass RX and TX FIFO's */
   5589 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5590 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5591 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5592 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5593 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5594 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5595 		}
   5596 	}
   5597 #if 0
   5598 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5599 #endif
   5600 
   5601 	/* Set up checksum offload parameters. */
   5602 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5603 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5604 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5605 		reg |= RXCSUM_IPOFL;
   5606 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5607 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5608 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5609 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5610 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5611 
   5612 	/* Set registers about MSI-X */
   5613 	if (wm_is_using_msix(sc)) {
   5614 		uint32_t ivar;
   5615 		struct wm_queue *wmq;
   5616 		int qid, qintr_idx;
   5617 
   5618 		if (sc->sc_type == WM_T_82575) {
   5619 			/* Interrupt control */
   5620 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5621 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5622 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5623 
   5624 			/* TX and RX */
   5625 			for (i = 0; i < sc->sc_nqueues; i++) {
   5626 				wmq = &sc->sc_queue[i];
   5627 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5628 				    EITR_TX_QUEUE(wmq->wmq_id)
   5629 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5630 			}
   5631 			/* Link status */
   5632 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5633 			    EITR_OTHER);
   5634 		} else if (sc->sc_type == WM_T_82574) {
   5635 			/* Interrupt control */
   5636 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5637 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5638 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5639 
   5640 			/*
   5641 			 * workaround issue with spurious interrupts
   5642 			 * in MSI-X mode.
   5643 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5644 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5645 			 */
   5646 			reg = CSR_READ(sc, WMREG_RFCTL);
   5647 			reg |= WMREG_RFCTL_ACKDIS;
   5648 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5649 
   5650 			ivar = 0;
   5651 			/* TX and RX */
   5652 			for (i = 0; i < sc->sc_nqueues; i++) {
   5653 				wmq = &sc->sc_queue[i];
   5654 				qid = wmq->wmq_id;
   5655 				qintr_idx = wmq->wmq_intr_idx;
   5656 
   5657 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5658 				    IVAR_TX_MASK_Q_82574(qid));
   5659 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5660 				    IVAR_RX_MASK_Q_82574(qid));
   5661 			}
   5662 			/* Link status */
   5663 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5664 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5665 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5666 		} else {
   5667 			/* Interrupt control */
   5668 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5669 			    | GPIE_EIAME | GPIE_PBA);
   5670 
   5671 			switch (sc->sc_type) {
   5672 			case WM_T_82580:
   5673 			case WM_T_I350:
   5674 			case WM_T_I354:
   5675 			case WM_T_I210:
   5676 			case WM_T_I211:
   5677 				/* TX and RX */
   5678 				for (i = 0; i < sc->sc_nqueues; i++) {
   5679 					wmq = &sc->sc_queue[i];
   5680 					qid = wmq->wmq_id;
   5681 					qintr_idx = wmq->wmq_intr_idx;
   5682 
   5683 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5684 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5685 					ivar |= __SHIFTIN((qintr_idx
   5686 						| IVAR_VALID),
   5687 					    IVAR_TX_MASK_Q(qid));
   5688 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5689 					ivar |= __SHIFTIN((qintr_idx
   5690 						| IVAR_VALID),
   5691 					    IVAR_RX_MASK_Q(qid));
   5692 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5693 				}
   5694 				break;
   5695 			case WM_T_82576:
   5696 				/* TX and RX */
   5697 				for (i = 0; i < sc->sc_nqueues; i++) {
   5698 					wmq = &sc->sc_queue[i];
   5699 					qid = wmq->wmq_id;
   5700 					qintr_idx = wmq->wmq_intr_idx;
   5701 
   5702 					ivar = CSR_READ(sc,
   5703 					    WMREG_IVAR_Q_82576(qid));
   5704 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5705 					ivar |= __SHIFTIN((qintr_idx
   5706 						| IVAR_VALID),
   5707 					    IVAR_TX_MASK_Q_82576(qid));
   5708 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5709 					ivar |= __SHIFTIN((qintr_idx
   5710 						| IVAR_VALID),
   5711 					    IVAR_RX_MASK_Q_82576(qid));
   5712 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5713 					    ivar);
   5714 				}
   5715 				break;
   5716 			default:
   5717 				break;
   5718 			}
   5719 
   5720 			/* Link status */
   5721 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5722 			    IVAR_MISC_OTHER);
   5723 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5724 		}
   5725 
   5726 		if (wm_is_using_multiqueue(sc)) {
   5727 			wm_init_rss(sc);
   5728 
   5729 			/*
   5730 			** NOTE: Receive Full-Packet Checksum Offload
   5731 			** is mutually exclusive with Multiqueue. However
   5732 			** this is not the same as TCP/IP checksums which
   5733 			** still work.
   5734 			*/
   5735 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5736 			reg |= RXCSUM_PCSD;
   5737 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5738 		}
   5739 	}
   5740 
   5741 	/* Set up the interrupt registers. */
   5742 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5743 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5744 	    ICR_RXO | ICR_RXT0;
   5745 	if (wm_is_using_msix(sc)) {
   5746 		uint32_t mask;
   5747 		struct wm_queue *wmq;
   5748 
   5749 		switch (sc->sc_type) {
   5750 		case WM_T_82574:
   5751 			mask = 0;
   5752 			for (i = 0; i < sc->sc_nqueues; i++) {
   5753 				wmq = &sc->sc_queue[i];
   5754 				mask |= ICR_TXQ(wmq->wmq_id);
   5755 				mask |= ICR_RXQ(wmq->wmq_id);
   5756 			}
   5757 			mask |= ICR_OTHER;
   5758 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5759 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5760 			break;
   5761 		default:
   5762 			if (sc->sc_type == WM_T_82575) {
   5763 				mask = 0;
   5764 				for (i = 0; i < sc->sc_nqueues; i++) {
   5765 					wmq = &sc->sc_queue[i];
   5766 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5767 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5768 				}
   5769 				mask |= EITR_OTHER;
   5770 			} else {
   5771 				mask = 0;
   5772 				for (i = 0; i < sc->sc_nqueues; i++) {
   5773 					wmq = &sc->sc_queue[i];
   5774 					mask |= 1 << wmq->wmq_intr_idx;
   5775 				}
   5776 				mask |= 1 << sc->sc_link_intr_idx;
   5777 			}
   5778 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5779 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5780 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5781 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5782 			break;
   5783 		}
   5784 	} else
   5785 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5786 
   5787 	/* Set up the inter-packet gap. */
   5788 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5789 
   5790 	if (sc->sc_type >= WM_T_82543) {
   5791 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5792 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5793 			wm_itrs_writereg(sc, wmq);
   5794 		}
   5795 		/*
   5796 		 * Link interrupts occur much less than TX
   5797 		 * interrupts and RX interrupts. So, we don't
   5798 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5799 		 * FreeBSD's if_igb.
   5800 		 */
   5801 	}
   5802 
   5803 	/* Set the VLAN ethernetype. */
   5804 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5805 
   5806 	/*
   5807 	 * Set up the transmit control register; we start out with
   5808 	 * a collision distance suitable for FDX, but update it whe
   5809 	 * we resolve the media type.
   5810 	 */
   5811 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5812 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5813 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5814 	if (sc->sc_type >= WM_T_82571)
   5815 		sc->sc_tctl |= TCTL_MULR;
   5816 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5817 
   5818 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5819 		/* Write TDT after TCTL.EN is set. See the document. */
   5820 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5821 	}
   5822 
   5823 	if (sc->sc_type == WM_T_80003) {
   5824 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5825 		reg &= ~TCTL_EXT_GCEX_MASK;
   5826 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5827 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5828 	}
   5829 
   5830 	/* Set the media. */
   5831 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5832 		goto out;
   5833 
   5834 	/* Configure for OS presence */
   5835 	wm_init_manageability(sc);
   5836 
   5837 	/*
   5838 	 * Set up the receive control register; we actually program the
   5839 	 * register when we set the receive filter. Use multicast address
   5840 	 * offset type 0.
   5841 	 *
   5842 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   5843 	 * don't enable that feature.
   5844 	 */
   5845 	sc->sc_mchash_type = 0;
   5846 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5847 	    | RCTL_MO(sc->sc_mchash_type);
   5848 
   5849 	/*
   5850 	 * 82574 use one buffer extended Rx descriptor.
   5851 	 */
   5852 	if (sc->sc_type == WM_T_82574)
   5853 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5854 
   5855 	/*
   5856 	 * The I350 has a bug where it always strips the CRC whether
   5857 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5858 	 */
   5859 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5860 	    || (sc->sc_type == WM_T_I210))
   5861 		sc->sc_rctl |= RCTL_SECRC;
   5862 
   5863 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5864 	    && (ifp->if_mtu > ETHERMTU)) {
   5865 		sc->sc_rctl |= RCTL_LPE;
   5866 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5867 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5868 	}
   5869 
   5870 	if (MCLBYTES == 2048) {
   5871 		sc->sc_rctl |= RCTL_2k;
   5872 	} else {
   5873 		if (sc->sc_type >= WM_T_82543) {
   5874 			switch (MCLBYTES) {
   5875 			case 4096:
   5876 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5877 				break;
   5878 			case 8192:
   5879 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5880 				break;
   5881 			case 16384:
   5882 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5883 				break;
   5884 			default:
   5885 				panic("wm_init: MCLBYTES %d unsupported",
   5886 				    MCLBYTES);
   5887 				break;
   5888 			}
   5889 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5890 	}
   5891 
   5892 	/* Enable ECC */
   5893 	switch (sc->sc_type) {
   5894 	case WM_T_82571:
   5895 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5896 		reg |= PBA_ECC_CORR_EN;
   5897 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5898 		break;
   5899 	case WM_T_PCH_LPT:
   5900 	case WM_T_PCH_SPT:
   5901 	case WM_T_PCH_CNP:
   5902 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5903 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5904 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5905 
   5906 		sc->sc_ctrl |= CTRL_MEHE;
   5907 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5908 		break;
   5909 	default:
   5910 		break;
   5911 	}
   5912 
   5913 	/*
   5914 	 * Set the receive filter.
   5915 	 *
   5916 	 * For 82575 and 82576, the RX descriptors must be initialized after
   5917 	 * the setting of RCTL.EN in wm_set_filter()
   5918 	 */
   5919 	wm_set_filter(sc);
   5920 
   5921 	/* On 575 and later set RDT only if RX enabled */
   5922 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5923 		int qidx;
   5924 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5925 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5926 			for (i = 0; i < WM_NRXDESC; i++) {
   5927 				mutex_enter(rxq->rxq_lock);
   5928 				wm_init_rxdesc(rxq, i);
   5929 				mutex_exit(rxq->rxq_lock);
   5930 
   5931 			}
   5932 		}
   5933 	}
   5934 
   5935 	wm_unset_stopping_flags(sc);
   5936 
   5937 	/* Start the one second link check clock. */
   5938 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5939 
   5940 	/* ...all done! */
   5941 	ifp->if_flags |= IFF_RUNNING;
   5942 	ifp->if_flags &= ~IFF_OACTIVE;
   5943 
   5944  out:
   5945 	sc->sc_if_flags = ifp->if_flags;
   5946 	if (error)
   5947 		log(LOG_ERR, "%s: interface not running\n",
   5948 		    device_xname(sc->sc_dev));
   5949 	return error;
   5950 }
   5951 
   5952 /*
   5953  * wm_stop:		[ifnet interface function]
   5954  *
   5955  *	Stop transmission on the interface.
   5956  */
   5957 static void
   5958 wm_stop(struct ifnet *ifp, int disable)
   5959 {
   5960 	struct wm_softc *sc = ifp->if_softc;
   5961 
   5962 	WM_CORE_LOCK(sc);
   5963 	wm_stop_locked(ifp, disable);
   5964 	WM_CORE_UNLOCK(sc);
   5965 }
   5966 
   5967 static void
   5968 wm_stop_locked(struct ifnet *ifp, int disable)
   5969 {
   5970 	struct wm_softc *sc = ifp->if_softc;
   5971 	struct wm_txsoft *txs;
   5972 	int i, qidx;
   5973 
   5974 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5975 		device_xname(sc->sc_dev), __func__));
   5976 	KASSERT(WM_CORE_LOCKED(sc));
   5977 
   5978 	wm_set_stopping_flags(sc);
   5979 
   5980 	/* Stop the one second clock. */
   5981 	callout_stop(&sc->sc_tick_ch);
   5982 
   5983 	/* Stop the 82547 Tx FIFO stall check timer. */
   5984 	if (sc->sc_type == WM_T_82547)
   5985 		callout_stop(&sc->sc_txfifo_ch);
   5986 
   5987 	if (sc->sc_flags & WM_F_HAS_MII) {
   5988 		/* Down the MII. */
   5989 		mii_down(&sc->sc_mii);
   5990 	} else {
   5991 #if 0
   5992 		/* Should we clear PHY's status properly? */
   5993 		wm_reset(sc);
   5994 #endif
   5995 	}
   5996 
   5997 	/* Stop the transmit and receive processes. */
   5998 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5999 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6000 	sc->sc_rctl &= ~RCTL_EN;
   6001 
   6002 	/*
   6003 	 * Clear the interrupt mask to ensure the device cannot assert its
   6004 	 * interrupt line.
   6005 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6006 	 * service any currently pending or shared interrupt.
   6007 	 */
   6008 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6009 	sc->sc_icr = 0;
   6010 	if (wm_is_using_msix(sc)) {
   6011 		if (sc->sc_type != WM_T_82574) {
   6012 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6013 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6014 		} else
   6015 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6016 	}
   6017 
   6018 	/* Release any queued transmit buffers. */
   6019 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6020 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6021 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6022 		mutex_enter(txq->txq_lock);
   6023 		txq->txq_sending = false; /* ensure watchdog disabled */
   6024 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6025 			txs = &txq->txq_soft[i];
   6026 			if (txs->txs_mbuf != NULL) {
   6027 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6028 				m_freem(txs->txs_mbuf);
   6029 				txs->txs_mbuf = NULL;
   6030 			}
   6031 		}
   6032 		mutex_exit(txq->txq_lock);
   6033 	}
   6034 
   6035 	/* Mark the interface as down and cancel the watchdog timer. */
   6036 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6037 
   6038 	if (disable) {
   6039 		for (i = 0; i < sc->sc_nqueues; i++) {
   6040 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6041 			mutex_enter(rxq->rxq_lock);
   6042 			wm_rxdrain(rxq);
   6043 			mutex_exit(rxq->rxq_lock);
   6044 		}
   6045 	}
   6046 
   6047 #if 0 /* notyet */
   6048 	if (sc->sc_type >= WM_T_82544)
   6049 		CSR_WRITE(sc, WMREG_WUC, 0);
   6050 #endif
   6051 }
   6052 
   6053 static void
   6054 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6055 {
   6056 	struct mbuf *m;
   6057 	int i;
   6058 
   6059 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6060 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6061 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6062 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6063 		    m->m_data, m->m_len, m->m_flags);
   6064 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6065 	    i, i == 1 ? "" : "s");
   6066 }
   6067 
   6068 /*
   6069  * wm_82547_txfifo_stall:
   6070  *
   6071  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6072  *	reset the FIFO pointers, and restart packet transmission.
   6073  */
   6074 static void
   6075 wm_82547_txfifo_stall(void *arg)
   6076 {
   6077 	struct wm_softc *sc = arg;
   6078 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6079 
   6080 	mutex_enter(txq->txq_lock);
   6081 
   6082 	if (txq->txq_stopping)
   6083 		goto out;
   6084 
   6085 	if (txq->txq_fifo_stall) {
   6086 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6087 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6088 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6089 			/*
   6090 			 * Packets have drained.  Stop transmitter, reset
   6091 			 * FIFO pointers, restart transmitter, and kick
   6092 			 * the packet queue.
   6093 			 */
   6094 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6095 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6096 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6097 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6098 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6099 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6100 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6101 			CSR_WRITE_FLUSH(sc);
   6102 
   6103 			txq->txq_fifo_head = 0;
   6104 			txq->txq_fifo_stall = 0;
   6105 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6106 		} else {
   6107 			/*
   6108 			 * Still waiting for packets to drain; try again in
   6109 			 * another tick.
   6110 			 */
   6111 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6112 		}
   6113 	}
   6114 
   6115 out:
   6116 	mutex_exit(txq->txq_lock);
   6117 }
   6118 
   6119 /*
   6120  * wm_82547_txfifo_bugchk:
   6121  *
   6122  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6123  *	prevent enqueueing a packet that would wrap around the end
   6124  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6125  *
   6126  *	We do this by checking the amount of space before the end
   6127  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6128  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6129  *	the internal FIFO pointers to the beginning, and restart
   6130  *	transmission on the interface.
   6131  */
   6132 #define	WM_FIFO_HDR		0x10
   6133 #define	WM_82547_PAD_LEN	0x3e0
   6134 static int
   6135 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6136 {
   6137 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6138 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6139 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6140 
   6141 	/* Just return if already stalled. */
   6142 	if (txq->txq_fifo_stall)
   6143 		return 1;
   6144 
   6145 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6146 		/* Stall only occurs in half-duplex mode. */
   6147 		goto send_packet;
   6148 	}
   6149 
   6150 	if (len >= WM_82547_PAD_LEN + space) {
   6151 		txq->txq_fifo_stall = 1;
   6152 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6153 		return 1;
   6154 	}
   6155 
   6156  send_packet:
   6157 	txq->txq_fifo_head += len;
   6158 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6159 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6160 
   6161 	return 0;
   6162 }
   6163 
   6164 static int
   6165 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6166 {
   6167 	int error;
   6168 
   6169 	/*
   6170 	 * Allocate the control data structures, and create and load the
   6171 	 * DMA map for it.
   6172 	 *
   6173 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6174 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6175 	 * both sets within the same 4G segment.
   6176 	 */
   6177 	if (sc->sc_type < WM_T_82544)
   6178 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6179 	else
   6180 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6181 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6182 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6183 	else
   6184 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6185 
   6186 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6187 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6188 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6189 		aprint_error_dev(sc->sc_dev,
   6190 		    "unable to allocate TX control data, error = %d\n",
   6191 		    error);
   6192 		goto fail_0;
   6193 	}
   6194 
   6195 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6196 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6197 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6198 		aprint_error_dev(sc->sc_dev,
   6199 		    "unable to map TX control data, error = %d\n", error);
   6200 		goto fail_1;
   6201 	}
   6202 
   6203 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6204 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6205 		aprint_error_dev(sc->sc_dev,
   6206 		    "unable to create TX control data DMA map, error = %d\n",
   6207 		    error);
   6208 		goto fail_2;
   6209 	}
   6210 
   6211 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6212 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6213 		aprint_error_dev(sc->sc_dev,
   6214 		    "unable to load TX control data DMA map, error = %d\n",
   6215 		    error);
   6216 		goto fail_3;
   6217 	}
   6218 
   6219 	return 0;
   6220 
   6221  fail_3:
   6222 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6223  fail_2:
   6224 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6225 	    WM_TXDESCS_SIZE(txq));
   6226  fail_1:
   6227 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6228  fail_0:
   6229 	return error;
   6230 }
   6231 
   6232 static void
   6233 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6234 {
   6235 
   6236 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6237 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6238 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6239 	    WM_TXDESCS_SIZE(txq));
   6240 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6241 }
   6242 
   6243 static int
   6244 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6245 {
   6246 	int error;
   6247 	size_t rxq_descs_size;
   6248 
   6249 	/*
   6250 	 * Allocate the control data structures, and create and load the
   6251 	 * DMA map for it.
   6252 	 *
   6253 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6254 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6255 	 * both sets within the same 4G segment.
   6256 	 */
   6257 	rxq->rxq_ndesc = WM_NRXDESC;
   6258 	if (sc->sc_type == WM_T_82574)
   6259 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6260 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6261 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6262 	else
   6263 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6264 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6265 
   6266 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6267 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6268 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6269 		aprint_error_dev(sc->sc_dev,
   6270 		    "unable to allocate RX control data, error = %d\n",
   6271 		    error);
   6272 		goto fail_0;
   6273 	}
   6274 
   6275 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6276 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6277 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6278 		aprint_error_dev(sc->sc_dev,
   6279 		    "unable to map RX control data, error = %d\n", error);
   6280 		goto fail_1;
   6281 	}
   6282 
   6283 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6284 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6285 		aprint_error_dev(sc->sc_dev,
   6286 		    "unable to create RX control data DMA map, error = %d\n",
   6287 		    error);
   6288 		goto fail_2;
   6289 	}
   6290 
   6291 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6292 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6293 		aprint_error_dev(sc->sc_dev,
   6294 		    "unable to load RX control data DMA map, error = %d\n",
   6295 		    error);
   6296 		goto fail_3;
   6297 	}
   6298 
   6299 	return 0;
   6300 
   6301  fail_3:
   6302 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6303  fail_2:
   6304 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6305 	    rxq_descs_size);
   6306  fail_1:
   6307 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6308  fail_0:
   6309 	return error;
   6310 }
   6311 
   6312 static void
   6313 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6314 {
   6315 
   6316 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6317 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6318 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6319 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6320 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6321 }
   6322 
   6323 
   6324 static int
   6325 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6326 {
   6327 	int i, error;
   6328 
   6329 	/* Create the transmit buffer DMA maps. */
   6330 	WM_TXQUEUELEN(txq) =
   6331 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6332 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6333 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6334 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6335 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6336 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6337 			aprint_error_dev(sc->sc_dev,
   6338 			    "unable to create Tx DMA map %d, error = %d\n",
   6339 			    i, error);
   6340 			goto fail;
   6341 		}
   6342 	}
   6343 
   6344 	return 0;
   6345 
   6346  fail:
   6347 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6348 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6349 			bus_dmamap_destroy(sc->sc_dmat,
   6350 			    txq->txq_soft[i].txs_dmamap);
   6351 	}
   6352 	return error;
   6353 }
   6354 
   6355 static void
   6356 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6357 {
   6358 	int i;
   6359 
   6360 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6361 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6362 			bus_dmamap_destroy(sc->sc_dmat,
   6363 			    txq->txq_soft[i].txs_dmamap);
   6364 	}
   6365 }
   6366 
   6367 static int
   6368 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6369 {
   6370 	int i, error;
   6371 
   6372 	/* Create the receive buffer DMA maps. */
   6373 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6374 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6375 			    MCLBYTES, 0, 0,
   6376 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6377 			aprint_error_dev(sc->sc_dev,
   6378 			    "unable to create Rx DMA map %d error = %d\n",
   6379 			    i, error);
   6380 			goto fail;
   6381 		}
   6382 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6383 	}
   6384 
   6385 	return 0;
   6386 
   6387  fail:
   6388 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6389 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6390 			bus_dmamap_destroy(sc->sc_dmat,
   6391 			    rxq->rxq_soft[i].rxs_dmamap);
   6392 	}
   6393 	return error;
   6394 }
   6395 
   6396 static void
   6397 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6398 {
   6399 	int i;
   6400 
   6401 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6402 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6403 			bus_dmamap_destroy(sc->sc_dmat,
   6404 			    rxq->rxq_soft[i].rxs_dmamap);
   6405 	}
   6406 }
   6407 
   6408 /*
   6409  * wm_alloc_quques:
   6410  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6411  */
   6412 static int
   6413 wm_alloc_txrx_queues(struct wm_softc *sc)
   6414 {
   6415 	int i, error, tx_done, rx_done;
   6416 
   6417 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6418 	    KM_SLEEP);
   6419 	if (sc->sc_queue == NULL) {
   6420 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6421 		error = ENOMEM;
   6422 		goto fail_0;
   6423 	}
   6424 
   6425 	/*
   6426 	 * For transmission
   6427 	 */
   6428 	error = 0;
   6429 	tx_done = 0;
   6430 	for (i = 0; i < sc->sc_nqueues; i++) {
   6431 #ifdef WM_EVENT_COUNTERS
   6432 		int j;
   6433 		const char *xname;
   6434 #endif
   6435 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6436 		txq->txq_sc = sc;
   6437 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6438 
   6439 		error = wm_alloc_tx_descs(sc, txq);
   6440 		if (error)
   6441 			break;
   6442 		error = wm_alloc_tx_buffer(sc, txq);
   6443 		if (error) {
   6444 			wm_free_tx_descs(sc, txq);
   6445 			break;
   6446 		}
   6447 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6448 		if (txq->txq_interq == NULL) {
   6449 			wm_free_tx_descs(sc, txq);
   6450 			wm_free_tx_buffer(sc, txq);
   6451 			error = ENOMEM;
   6452 			break;
   6453 		}
   6454 
   6455 #ifdef WM_EVENT_COUNTERS
   6456 		xname = device_xname(sc->sc_dev);
   6457 
   6458 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6459 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6460 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6461 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6462 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6463 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6464 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6465 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6466 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6467 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6468 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6469 
   6470 		for (j = 0; j < WM_NTXSEGS; j++) {
   6471 			snprintf(txq->txq_txseg_evcnt_names[j],
   6472 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6473 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6474 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6475 		}
   6476 
   6477 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6478 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6479 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6480 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6481 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6482 #endif /* WM_EVENT_COUNTERS */
   6483 
   6484 		tx_done++;
   6485 	}
   6486 	if (error)
   6487 		goto fail_1;
   6488 
   6489 	/*
   6490 	 * For recieve
   6491 	 */
   6492 	error = 0;
   6493 	rx_done = 0;
   6494 	for (i = 0; i < sc->sc_nqueues; i++) {
   6495 #ifdef WM_EVENT_COUNTERS
   6496 		const char *xname;
   6497 #endif
   6498 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6499 		rxq->rxq_sc = sc;
   6500 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6501 
   6502 		error = wm_alloc_rx_descs(sc, rxq);
   6503 		if (error)
   6504 			break;
   6505 
   6506 		error = wm_alloc_rx_buffer(sc, rxq);
   6507 		if (error) {
   6508 			wm_free_rx_descs(sc, rxq);
   6509 			break;
   6510 		}
   6511 
   6512 #ifdef WM_EVENT_COUNTERS
   6513 		xname = device_xname(sc->sc_dev);
   6514 
   6515 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6516 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6517 
   6518 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6519 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6520 #endif /* WM_EVENT_COUNTERS */
   6521 
   6522 		rx_done++;
   6523 	}
   6524 	if (error)
   6525 		goto fail_2;
   6526 
   6527 	return 0;
   6528 
   6529  fail_2:
   6530 	for (i = 0; i < rx_done; i++) {
   6531 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6532 		wm_free_rx_buffer(sc, rxq);
   6533 		wm_free_rx_descs(sc, rxq);
   6534 		if (rxq->rxq_lock)
   6535 			mutex_obj_free(rxq->rxq_lock);
   6536 	}
   6537  fail_1:
   6538 	for (i = 0; i < tx_done; i++) {
   6539 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6540 		pcq_destroy(txq->txq_interq);
   6541 		wm_free_tx_buffer(sc, txq);
   6542 		wm_free_tx_descs(sc, txq);
   6543 		if (txq->txq_lock)
   6544 			mutex_obj_free(txq->txq_lock);
   6545 	}
   6546 
   6547 	kmem_free(sc->sc_queue,
   6548 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6549  fail_0:
   6550 	return error;
   6551 }
   6552 
   6553 /*
   6554  * wm_free_quques:
   6555  *	Free {tx,rx}descs and {tx,rx} buffers
   6556  */
   6557 static void
   6558 wm_free_txrx_queues(struct wm_softc *sc)
   6559 {
   6560 	int i;
   6561 
   6562 	for (i = 0; i < sc->sc_nqueues; i++) {
   6563 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6564 
   6565 #ifdef WM_EVENT_COUNTERS
   6566 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6567 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6568 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6569 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6570 #endif /* WM_EVENT_COUNTERS */
   6571 
   6572 		wm_free_rx_buffer(sc, rxq);
   6573 		wm_free_rx_descs(sc, rxq);
   6574 		if (rxq->rxq_lock)
   6575 			mutex_obj_free(rxq->rxq_lock);
   6576 	}
   6577 
   6578 	for (i = 0; i < sc->sc_nqueues; i++) {
   6579 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6580 		struct mbuf *m;
   6581 #ifdef WM_EVENT_COUNTERS
   6582 		int j;
   6583 
   6584 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6585 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6586 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6587 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6588 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6589 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6590 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6591 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6592 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6593 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6594 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6595 
   6596 		for (j = 0; j < WM_NTXSEGS; j++)
   6597 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6598 
   6599 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6600 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6601 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6602 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6603 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6604 #endif /* WM_EVENT_COUNTERS */
   6605 
   6606 		/* drain txq_interq */
   6607 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6608 			m_freem(m);
   6609 		pcq_destroy(txq->txq_interq);
   6610 
   6611 		wm_free_tx_buffer(sc, txq);
   6612 		wm_free_tx_descs(sc, txq);
   6613 		if (txq->txq_lock)
   6614 			mutex_obj_free(txq->txq_lock);
   6615 	}
   6616 
   6617 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6618 }
   6619 
   6620 static void
   6621 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6622 {
   6623 
   6624 	KASSERT(mutex_owned(txq->txq_lock));
   6625 
   6626 	/* Initialize the transmit descriptor ring. */
   6627 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6628 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6629 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6630 	txq->txq_free = WM_NTXDESC(txq);
   6631 	txq->txq_next = 0;
   6632 }
   6633 
   6634 static void
   6635 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6636     struct wm_txqueue *txq)
   6637 {
   6638 
   6639 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6640 		device_xname(sc->sc_dev), __func__));
   6641 	KASSERT(mutex_owned(txq->txq_lock));
   6642 
   6643 	if (sc->sc_type < WM_T_82543) {
   6644 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6645 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6646 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6647 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6648 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6649 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6650 	} else {
   6651 		int qid = wmq->wmq_id;
   6652 
   6653 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6654 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6655 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6656 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6657 
   6658 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6659 			/*
   6660 			 * Don't write TDT before TCTL.EN is set.
   6661 			 * See the document.
   6662 			 */
   6663 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6664 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6665 			    | TXDCTL_WTHRESH(0));
   6666 		else {
   6667 			/* XXX should update with AIM? */
   6668 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6669 			if (sc->sc_type >= WM_T_82540) {
   6670 				/* should be same */
   6671 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6672 			}
   6673 
   6674 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6675 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6676 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6677 		}
   6678 	}
   6679 }
   6680 
   6681 static void
   6682 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6683 {
   6684 	int i;
   6685 
   6686 	KASSERT(mutex_owned(txq->txq_lock));
   6687 
   6688 	/* Initialize the transmit job descriptors. */
   6689 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6690 		txq->txq_soft[i].txs_mbuf = NULL;
   6691 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6692 	txq->txq_snext = 0;
   6693 	txq->txq_sdirty = 0;
   6694 }
   6695 
   6696 static void
   6697 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6698     struct wm_txqueue *txq)
   6699 {
   6700 
   6701 	KASSERT(mutex_owned(txq->txq_lock));
   6702 
   6703 	/*
   6704 	 * Set up some register offsets that are different between
   6705 	 * the i82542 and the i82543 and later chips.
   6706 	 */
   6707 	if (sc->sc_type < WM_T_82543)
   6708 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6709 	else
   6710 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6711 
   6712 	wm_init_tx_descs(sc, txq);
   6713 	wm_init_tx_regs(sc, wmq, txq);
   6714 	wm_init_tx_buffer(sc, txq);
   6715 
   6716 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   6717 	txq->txq_sending = false;
   6718 }
   6719 
   6720 static void
   6721 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6722     struct wm_rxqueue *rxq)
   6723 {
   6724 
   6725 	KASSERT(mutex_owned(rxq->rxq_lock));
   6726 
   6727 	/*
   6728 	 * Initialize the receive descriptor and receive job
   6729 	 * descriptor rings.
   6730 	 */
   6731 	if (sc->sc_type < WM_T_82543) {
   6732 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6733 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6734 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6735 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6736 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6737 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6738 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6739 
   6740 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6741 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6742 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6743 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6744 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6745 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6746 	} else {
   6747 		int qid = wmq->wmq_id;
   6748 
   6749 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6750 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6751 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   6752 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6753 
   6754 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6755 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6756 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6757 
   6758 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6759 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6760 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6761 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6762 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6763 			    | RXDCTL_WTHRESH(1));
   6764 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6765 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6766 		} else {
   6767 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6768 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6769 			/* XXX should update with AIM? */
   6770 			CSR_WRITE(sc, WMREG_RDTR,
   6771 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   6772 			/* MUST be same */
   6773 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6774 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6775 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6776 		}
   6777 	}
   6778 }
   6779 
   6780 static int
   6781 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6782 {
   6783 	struct wm_rxsoft *rxs;
   6784 	int error, i;
   6785 
   6786 	KASSERT(mutex_owned(rxq->rxq_lock));
   6787 
   6788 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6789 		rxs = &rxq->rxq_soft[i];
   6790 		if (rxs->rxs_mbuf == NULL) {
   6791 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6792 				log(LOG_ERR, "%s: unable to allocate or map "
   6793 				    "rx buffer %d, error = %d\n",
   6794 				    device_xname(sc->sc_dev), i, error);
   6795 				/*
   6796 				 * XXX Should attempt to run with fewer receive
   6797 				 * XXX buffers instead of just failing.
   6798 				 */
   6799 				wm_rxdrain(rxq);
   6800 				return ENOMEM;
   6801 			}
   6802 		} else {
   6803 			/*
   6804 			 * For 82575 and 82576, the RX descriptors must be
   6805 			 * initialized after the setting of RCTL.EN in
   6806 			 * wm_set_filter()
   6807 			 */
   6808 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6809 				wm_init_rxdesc(rxq, i);
   6810 		}
   6811 	}
   6812 	rxq->rxq_ptr = 0;
   6813 	rxq->rxq_discard = 0;
   6814 	WM_RXCHAIN_RESET(rxq);
   6815 
   6816 	return 0;
   6817 }
   6818 
   6819 static int
   6820 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6821     struct wm_rxqueue *rxq)
   6822 {
   6823 
   6824 	KASSERT(mutex_owned(rxq->rxq_lock));
   6825 
   6826 	/*
   6827 	 * Set up some register offsets that are different between
   6828 	 * the i82542 and the i82543 and later chips.
   6829 	 */
   6830 	if (sc->sc_type < WM_T_82543)
   6831 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6832 	else
   6833 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6834 
   6835 	wm_init_rx_regs(sc, wmq, rxq);
   6836 	return wm_init_rx_buffer(sc, rxq);
   6837 }
   6838 
   6839 /*
   6840  * wm_init_quques:
   6841  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6842  */
   6843 static int
   6844 wm_init_txrx_queues(struct wm_softc *sc)
   6845 {
   6846 	int i, error = 0;
   6847 
   6848 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6849 		device_xname(sc->sc_dev), __func__));
   6850 
   6851 	for (i = 0; i < sc->sc_nqueues; i++) {
   6852 		struct wm_queue *wmq = &sc->sc_queue[i];
   6853 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6854 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6855 
   6856 		/*
   6857 		 * TODO
   6858 		 * Currently, use constant variable instead of AIM.
   6859 		 * Furthermore, the interrupt interval of multiqueue which use
   6860 		 * polling mode is less than default value.
   6861 		 * More tuning and AIM are required.
   6862 		 */
   6863 		if (wm_is_using_multiqueue(sc))
   6864 			wmq->wmq_itr = 50;
   6865 		else
   6866 			wmq->wmq_itr = sc->sc_itr_init;
   6867 		wmq->wmq_set_itr = true;
   6868 
   6869 		mutex_enter(txq->txq_lock);
   6870 		wm_init_tx_queue(sc, wmq, txq);
   6871 		mutex_exit(txq->txq_lock);
   6872 
   6873 		mutex_enter(rxq->rxq_lock);
   6874 		error = wm_init_rx_queue(sc, wmq, rxq);
   6875 		mutex_exit(rxq->rxq_lock);
   6876 		if (error)
   6877 			break;
   6878 	}
   6879 
   6880 	return error;
   6881 }
   6882 
   6883 /*
   6884  * wm_tx_offload:
   6885  *
   6886  *	Set up TCP/IP checksumming parameters for the
   6887  *	specified packet.
   6888  */
   6889 static int
   6890 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6891     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6892 {
   6893 	struct mbuf *m0 = txs->txs_mbuf;
   6894 	struct livengood_tcpip_ctxdesc *t;
   6895 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6896 	uint32_t ipcse;
   6897 	struct ether_header *eh;
   6898 	int offset, iphl;
   6899 	uint8_t fields;
   6900 
   6901 	/*
   6902 	 * XXX It would be nice if the mbuf pkthdr had offset
   6903 	 * fields for the protocol headers.
   6904 	 */
   6905 
   6906 	eh = mtod(m0, struct ether_header *);
   6907 	switch (htons(eh->ether_type)) {
   6908 	case ETHERTYPE_IP:
   6909 	case ETHERTYPE_IPV6:
   6910 		offset = ETHER_HDR_LEN;
   6911 		break;
   6912 
   6913 	case ETHERTYPE_VLAN:
   6914 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6915 		break;
   6916 
   6917 	default:
   6918 		/*
   6919 		 * Don't support this protocol or encapsulation.
   6920 		 */
   6921 		*fieldsp = 0;
   6922 		*cmdp = 0;
   6923 		return 0;
   6924 	}
   6925 
   6926 	if ((m0->m_pkthdr.csum_flags &
   6927 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6928 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6929 	} else {
   6930 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   6931 	}
   6932 	ipcse = offset + iphl - 1;
   6933 
   6934 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6935 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6936 	seg = 0;
   6937 	fields = 0;
   6938 
   6939 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6940 		int hlen = offset + iphl;
   6941 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6942 
   6943 		if (__predict_false(m0->m_len <
   6944 				    (hlen + sizeof(struct tcphdr)))) {
   6945 			/*
   6946 			 * TCP/IP headers are not in the first mbuf; we need
   6947 			 * to do this the slow and painful way. Let's just
   6948 			 * hope this doesn't happen very often.
   6949 			 */
   6950 			struct tcphdr th;
   6951 
   6952 			WM_Q_EVCNT_INCR(txq, tsopain);
   6953 
   6954 			m_copydata(m0, hlen, sizeof(th), &th);
   6955 			if (v4) {
   6956 				struct ip ip;
   6957 
   6958 				m_copydata(m0, offset, sizeof(ip), &ip);
   6959 				ip.ip_len = 0;
   6960 				m_copyback(m0,
   6961 				    offset + offsetof(struct ip, ip_len),
   6962 				    sizeof(ip.ip_len), &ip.ip_len);
   6963 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6964 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6965 			} else {
   6966 				struct ip6_hdr ip6;
   6967 
   6968 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6969 				ip6.ip6_plen = 0;
   6970 				m_copyback(m0,
   6971 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6972 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6973 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6974 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6975 			}
   6976 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6977 			    sizeof(th.th_sum), &th.th_sum);
   6978 
   6979 			hlen += th.th_off << 2;
   6980 		} else {
   6981 			/*
   6982 			 * TCP/IP headers are in the first mbuf; we can do
   6983 			 * this the easy way.
   6984 			 */
   6985 			struct tcphdr *th;
   6986 
   6987 			if (v4) {
   6988 				struct ip *ip =
   6989 				    (void *)(mtod(m0, char *) + offset);
   6990 				th = (void *)(mtod(m0, char *) + hlen);
   6991 
   6992 				ip->ip_len = 0;
   6993 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6994 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6995 			} else {
   6996 				struct ip6_hdr *ip6 =
   6997 				    (void *)(mtod(m0, char *) + offset);
   6998 				th = (void *)(mtod(m0, char *) + hlen);
   6999 
   7000 				ip6->ip6_plen = 0;
   7001 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7002 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7003 			}
   7004 			hlen += th->th_off << 2;
   7005 		}
   7006 
   7007 		if (v4) {
   7008 			WM_Q_EVCNT_INCR(txq, tso);
   7009 			cmdlen |= WTX_TCPIP_CMD_IP;
   7010 		} else {
   7011 			WM_Q_EVCNT_INCR(txq, tso6);
   7012 			ipcse = 0;
   7013 		}
   7014 		cmd |= WTX_TCPIP_CMD_TSE;
   7015 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7016 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7017 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7018 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7019 	}
   7020 
   7021 	/*
   7022 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7023 	 * offload feature, if we load the context descriptor, we
   7024 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7025 	 */
   7026 
   7027 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7028 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7029 	    WTX_TCPIP_IPCSE(ipcse);
   7030 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7031 		WM_Q_EVCNT_INCR(txq, ipsum);
   7032 		fields |= WTX_IXSM;
   7033 	}
   7034 
   7035 	offset += iphl;
   7036 
   7037 	if (m0->m_pkthdr.csum_flags &
   7038 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7039 		WM_Q_EVCNT_INCR(txq, tusum);
   7040 		fields |= WTX_TXSM;
   7041 		tucs = WTX_TCPIP_TUCSS(offset) |
   7042 		    WTX_TCPIP_TUCSO(offset +
   7043 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7044 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7045 	} else if ((m0->m_pkthdr.csum_flags &
   7046 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7047 		WM_Q_EVCNT_INCR(txq, tusum6);
   7048 		fields |= WTX_TXSM;
   7049 		tucs = WTX_TCPIP_TUCSS(offset) |
   7050 		    WTX_TCPIP_TUCSO(offset +
   7051 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7052 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7053 	} else {
   7054 		/* Just initialize it to a valid TCP context. */
   7055 		tucs = WTX_TCPIP_TUCSS(offset) |
   7056 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7057 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7058 	}
   7059 
   7060 	/*
   7061 	 * We don't have to write context descriptor for every packet
   7062 	 * except for 82574. For 82574, we must write context descriptor
   7063 	 * for every packet when we use two descriptor queues.
   7064 	 * It would be overhead to write context descriptor for every packet,
   7065 	 * however it does not cause problems.
   7066 	 */
   7067 	/* Fill in the context descriptor. */
   7068 	t = (struct livengood_tcpip_ctxdesc *)
   7069 	    &txq->txq_descs[txq->txq_next];
   7070 	t->tcpip_ipcs = htole32(ipcs);
   7071 	t->tcpip_tucs = htole32(tucs);
   7072 	t->tcpip_cmdlen = htole32(cmdlen);
   7073 	t->tcpip_seg = htole32(seg);
   7074 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7075 
   7076 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7077 	txs->txs_ndesc++;
   7078 
   7079 	*cmdp = cmd;
   7080 	*fieldsp = fields;
   7081 
   7082 	return 0;
   7083 }
   7084 
   7085 static inline int
   7086 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7087 {
   7088 	struct wm_softc *sc = ifp->if_softc;
   7089 	u_int cpuid = cpu_index(curcpu());
   7090 
   7091 	/*
   7092 	 * Currently, simple distribute strategy.
   7093 	 * TODO:
   7094 	 * distribute by flowid(RSS has value).
   7095 	 */
   7096 	return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   7097 }
   7098 
   7099 /*
   7100  * wm_start:		[ifnet interface function]
   7101  *
   7102  *	Start packet transmission on the interface.
   7103  */
   7104 static void
   7105 wm_start(struct ifnet *ifp)
   7106 {
   7107 	struct wm_softc *sc = ifp->if_softc;
   7108 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7109 
   7110 #ifdef WM_MPSAFE
   7111 	KASSERT(if_is_mpsafe(ifp));
   7112 #endif
   7113 	/*
   7114 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7115 	 */
   7116 
   7117 	mutex_enter(txq->txq_lock);
   7118 	if (!txq->txq_stopping)
   7119 		wm_start_locked(ifp);
   7120 	mutex_exit(txq->txq_lock);
   7121 }
   7122 
   7123 static void
   7124 wm_start_locked(struct ifnet *ifp)
   7125 {
   7126 	struct wm_softc *sc = ifp->if_softc;
   7127 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7128 
   7129 	wm_send_common_locked(ifp, txq, false);
   7130 }
   7131 
   7132 static int
   7133 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7134 {
   7135 	int qid;
   7136 	struct wm_softc *sc = ifp->if_softc;
   7137 	struct wm_txqueue *txq;
   7138 
   7139 	qid = wm_select_txqueue(ifp, m);
   7140 	txq = &sc->sc_queue[qid].wmq_txq;
   7141 
   7142 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7143 		m_freem(m);
   7144 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7145 		return ENOBUFS;
   7146 	}
   7147 
   7148 	/*
   7149 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7150 	 */
   7151 	ifp->if_obytes += m->m_pkthdr.len;
   7152 	if (m->m_flags & M_MCAST)
   7153 		ifp->if_omcasts++;
   7154 
   7155 	if (mutex_tryenter(txq->txq_lock)) {
   7156 		if (!txq->txq_stopping)
   7157 			wm_transmit_locked(ifp, txq);
   7158 		mutex_exit(txq->txq_lock);
   7159 	}
   7160 
   7161 	return 0;
   7162 }
   7163 
   7164 static void
   7165 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7166 {
   7167 
   7168 	wm_send_common_locked(ifp, txq, true);
   7169 }
   7170 
   7171 static void
   7172 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7173     bool is_transmit)
   7174 {
   7175 	struct wm_softc *sc = ifp->if_softc;
   7176 	struct mbuf *m0;
   7177 	struct wm_txsoft *txs;
   7178 	bus_dmamap_t dmamap;
   7179 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7180 	bus_addr_t curaddr;
   7181 	bus_size_t seglen, curlen;
   7182 	uint32_t cksumcmd;
   7183 	uint8_t cksumfields;
   7184 	bool remap = true;
   7185 
   7186 	KASSERT(mutex_owned(txq->txq_lock));
   7187 
   7188 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7189 		return;
   7190 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7191 		return;
   7192 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7193 		return;
   7194 
   7195 	/* Remember the previous number of free descriptors. */
   7196 	ofree = txq->txq_free;
   7197 
   7198 	/*
   7199 	 * Loop through the send queue, setting up transmit descriptors
   7200 	 * until we drain the queue, or use up all available transmit
   7201 	 * descriptors.
   7202 	 */
   7203 	for (;;) {
   7204 		m0 = NULL;
   7205 
   7206 		/* Get a work queue entry. */
   7207 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7208 			wm_txeof(txq, UINT_MAX);
   7209 			if (txq->txq_sfree == 0) {
   7210 				DPRINTF(WM_DEBUG_TX,
   7211 				    ("%s: TX: no free job descriptors\n",
   7212 					device_xname(sc->sc_dev)));
   7213 				WM_Q_EVCNT_INCR(txq, txsstall);
   7214 				break;
   7215 			}
   7216 		}
   7217 
   7218 		/* Grab a packet off the queue. */
   7219 		if (is_transmit)
   7220 			m0 = pcq_get(txq->txq_interq);
   7221 		else
   7222 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7223 		if (m0 == NULL)
   7224 			break;
   7225 
   7226 		DPRINTF(WM_DEBUG_TX,
   7227 		    ("%s: TX: have packet to transmit: %p\n",
   7228 			device_xname(sc->sc_dev), m0));
   7229 
   7230 		txs = &txq->txq_soft[txq->txq_snext];
   7231 		dmamap = txs->txs_dmamap;
   7232 
   7233 		use_tso = (m0->m_pkthdr.csum_flags &
   7234 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7235 
   7236 		/*
   7237 		 * So says the Linux driver:
   7238 		 * The controller does a simple calculation to make sure
   7239 		 * there is enough room in the FIFO before initiating the
   7240 		 * DMA for each buffer. The calc is:
   7241 		 *	4 = ceil(buffer len / MSS)
   7242 		 * To make sure we don't overrun the FIFO, adjust the max
   7243 		 * buffer len if the MSS drops.
   7244 		 */
   7245 		dmamap->dm_maxsegsz =
   7246 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7247 		    ? m0->m_pkthdr.segsz << 2
   7248 		    : WTX_MAX_LEN;
   7249 
   7250 		/*
   7251 		 * Load the DMA map.  If this fails, the packet either
   7252 		 * didn't fit in the allotted number of segments, or we
   7253 		 * were short on resources.  For the too-many-segments
   7254 		 * case, we simply report an error and drop the packet,
   7255 		 * since we can't sanely copy a jumbo packet to a single
   7256 		 * buffer.
   7257 		 */
   7258 retry:
   7259 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7260 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7261 		if (__predict_false(error)) {
   7262 			if (error == EFBIG) {
   7263 				if (remap == true) {
   7264 					struct mbuf *m;
   7265 
   7266 					remap = false;
   7267 					m = m_defrag(m0, M_NOWAIT);
   7268 					if (m != NULL) {
   7269 						WM_Q_EVCNT_INCR(txq, defrag);
   7270 						m0 = m;
   7271 						goto retry;
   7272 					}
   7273 				}
   7274 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7275 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7276 				    "DMA segments, dropping...\n",
   7277 				    device_xname(sc->sc_dev));
   7278 				wm_dump_mbuf_chain(sc, m0);
   7279 				m_freem(m0);
   7280 				continue;
   7281 			}
   7282 			/*  Short on resources, just stop for now. */
   7283 			DPRINTF(WM_DEBUG_TX,
   7284 			    ("%s: TX: dmamap load failed: %d\n",
   7285 				device_xname(sc->sc_dev), error));
   7286 			break;
   7287 		}
   7288 
   7289 		segs_needed = dmamap->dm_nsegs;
   7290 		if (use_tso) {
   7291 			/* For sentinel descriptor; see below. */
   7292 			segs_needed++;
   7293 		}
   7294 
   7295 		/*
   7296 		 * Ensure we have enough descriptors free to describe
   7297 		 * the packet. Note, we always reserve one descriptor
   7298 		 * at the end of the ring due to the semantics of the
   7299 		 * TDT register, plus one more in the event we need
   7300 		 * to load offload context.
   7301 		 */
   7302 		if (segs_needed > txq->txq_free - 2) {
   7303 			/*
   7304 			 * Not enough free descriptors to transmit this
   7305 			 * packet.  We haven't committed anything yet,
   7306 			 * so just unload the DMA map, put the packet
   7307 			 * pack on the queue, and punt. Notify the upper
   7308 			 * layer that there are no more slots left.
   7309 			 */
   7310 			DPRINTF(WM_DEBUG_TX,
   7311 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7312 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7313 				segs_needed, txq->txq_free - 1));
   7314 			if (!is_transmit)
   7315 				ifp->if_flags |= IFF_OACTIVE;
   7316 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7317 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7318 			WM_Q_EVCNT_INCR(txq, txdstall);
   7319 			break;
   7320 		}
   7321 
   7322 		/*
   7323 		 * Check for 82547 Tx FIFO bug. We need to do this
   7324 		 * once we know we can transmit the packet, since we
   7325 		 * do some internal FIFO space accounting here.
   7326 		 */
   7327 		if (sc->sc_type == WM_T_82547 &&
   7328 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7329 			DPRINTF(WM_DEBUG_TX,
   7330 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7331 				device_xname(sc->sc_dev)));
   7332 			if (!is_transmit)
   7333 				ifp->if_flags |= IFF_OACTIVE;
   7334 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7335 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7336 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7337 			break;
   7338 		}
   7339 
   7340 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7341 
   7342 		DPRINTF(WM_DEBUG_TX,
   7343 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7344 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7345 
   7346 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7347 
   7348 		/*
   7349 		 * Store a pointer to the packet so that we can free it
   7350 		 * later.
   7351 		 *
   7352 		 * Initially, we consider the number of descriptors the
   7353 		 * packet uses the number of DMA segments.  This may be
   7354 		 * incremented by 1 if we do checksum offload (a descriptor
   7355 		 * is used to set the checksum context).
   7356 		 */
   7357 		txs->txs_mbuf = m0;
   7358 		txs->txs_firstdesc = txq->txq_next;
   7359 		txs->txs_ndesc = segs_needed;
   7360 
   7361 		/* Set up offload parameters for this packet. */
   7362 		if (m0->m_pkthdr.csum_flags &
   7363 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7364 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7365 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7366 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7367 					  &cksumfields) != 0) {
   7368 				/* Error message already displayed. */
   7369 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7370 				continue;
   7371 			}
   7372 		} else {
   7373 			cksumcmd = 0;
   7374 			cksumfields = 0;
   7375 		}
   7376 
   7377 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7378 
   7379 		/* Sync the DMA map. */
   7380 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7381 		    BUS_DMASYNC_PREWRITE);
   7382 
   7383 		/* Initialize the transmit descriptor. */
   7384 		for (nexttx = txq->txq_next, seg = 0;
   7385 		     seg < dmamap->dm_nsegs; seg++) {
   7386 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7387 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7388 			     seglen != 0;
   7389 			     curaddr += curlen, seglen -= curlen,
   7390 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7391 				curlen = seglen;
   7392 
   7393 				/*
   7394 				 * So says the Linux driver:
   7395 				 * Work around for premature descriptor
   7396 				 * write-backs in TSO mode.  Append a
   7397 				 * 4-byte sentinel descriptor.
   7398 				 */
   7399 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7400 				    curlen > 8)
   7401 					curlen -= 4;
   7402 
   7403 				wm_set_dma_addr(
   7404 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7405 				txq->txq_descs[nexttx].wtx_cmdlen
   7406 				    = htole32(cksumcmd | curlen);
   7407 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7408 				    = 0;
   7409 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7410 				    = cksumfields;
   7411 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7412 				lasttx = nexttx;
   7413 
   7414 				DPRINTF(WM_DEBUG_TX,
   7415 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7416 					"len %#04zx\n",
   7417 					device_xname(sc->sc_dev), nexttx,
   7418 					(uint64_t)curaddr, curlen));
   7419 			}
   7420 		}
   7421 
   7422 		KASSERT(lasttx != -1);
   7423 
   7424 		/*
   7425 		 * Set up the command byte on the last descriptor of
   7426 		 * the packet. If we're in the interrupt delay window,
   7427 		 * delay the interrupt.
   7428 		 */
   7429 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7430 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7431 
   7432 		/*
   7433 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7434 		 * up the descriptor to encapsulate the packet for us.
   7435 		 *
   7436 		 * This is only valid on the last descriptor of the packet.
   7437 		 */
   7438 		if (vlan_has_tag(m0)) {
   7439 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7440 			    htole32(WTX_CMD_VLE);
   7441 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7442 			    = htole16(vlan_get_tag(m0));
   7443 		}
   7444 
   7445 		txs->txs_lastdesc = lasttx;
   7446 
   7447 		DPRINTF(WM_DEBUG_TX,
   7448 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7449 			device_xname(sc->sc_dev),
   7450 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7451 
   7452 		/* Sync the descriptors we're using. */
   7453 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7454 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7455 
   7456 		/* Give the packet to the chip. */
   7457 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7458 
   7459 		DPRINTF(WM_DEBUG_TX,
   7460 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7461 
   7462 		DPRINTF(WM_DEBUG_TX,
   7463 		    ("%s: TX: finished transmitting packet, job %d\n",
   7464 			device_xname(sc->sc_dev), txq->txq_snext));
   7465 
   7466 		/* Advance the tx pointer. */
   7467 		txq->txq_free -= txs->txs_ndesc;
   7468 		txq->txq_next = nexttx;
   7469 
   7470 		txq->txq_sfree--;
   7471 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7472 
   7473 		/* Pass the packet to any BPF listeners. */
   7474 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7475 	}
   7476 
   7477 	if (m0 != NULL) {
   7478 		if (!is_transmit)
   7479 			ifp->if_flags |= IFF_OACTIVE;
   7480 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7481 		WM_Q_EVCNT_INCR(txq, descdrop);
   7482 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7483 			__func__));
   7484 		m_freem(m0);
   7485 	}
   7486 
   7487 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7488 		/* No more slots; notify upper layer. */
   7489 		if (!is_transmit)
   7490 			ifp->if_flags |= IFF_OACTIVE;
   7491 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7492 	}
   7493 
   7494 	if (txq->txq_free != ofree) {
   7495 		/* Set a watchdog timer in case the chip flakes out. */
   7496 		txq->txq_lastsent = time_uptime;
   7497 		txq->txq_sending = true;
   7498 	}
   7499 }
   7500 
   7501 /*
   7502  * wm_nq_tx_offload:
   7503  *
   7504  *	Set up TCP/IP checksumming parameters for the
   7505  *	specified packet, for NEWQUEUE devices
   7506  */
   7507 static int
   7508 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7509     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7510 {
   7511 	struct mbuf *m0 = txs->txs_mbuf;
   7512 	uint32_t vl_len, mssidx, cmdc;
   7513 	struct ether_header *eh;
   7514 	int offset, iphl;
   7515 
   7516 	/*
   7517 	 * XXX It would be nice if the mbuf pkthdr had offset
   7518 	 * fields for the protocol headers.
   7519 	 */
   7520 	*cmdlenp = 0;
   7521 	*fieldsp = 0;
   7522 
   7523 	eh = mtod(m0, struct ether_header *);
   7524 	switch (htons(eh->ether_type)) {
   7525 	case ETHERTYPE_IP:
   7526 	case ETHERTYPE_IPV6:
   7527 		offset = ETHER_HDR_LEN;
   7528 		break;
   7529 
   7530 	case ETHERTYPE_VLAN:
   7531 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7532 		break;
   7533 
   7534 	default:
   7535 		/* Don't support this protocol or encapsulation. */
   7536 		*do_csum = false;
   7537 		return 0;
   7538 	}
   7539 	*do_csum = true;
   7540 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7541 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7542 
   7543 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7544 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7545 
   7546 	if ((m0->m_pkthdr.csum_flags &
   7547 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7548 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7549 	} else {
   7550 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7551 	}
   7552 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7553 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7554 
   7555 	if (vlan_has_tag(m0)) {
   7556 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7557 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7558 		*cmdlenp |= NQTX_CMD_VLE;
   7559 	}
   7560 
   7561 	mssidx = 0;
   7562 
   7563 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7564 		int hlen = offset + iphl;
   7565 		int tcp_hlen;
   7566 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7567 
   7568 		if (__predict_false(m0->m_len <
   7569 				    (hlen + sizeof(struct tcphdr)))) {
   7570 			/*
   7571 			 * TCP/IP headers are not in the first mbuf; we need
   7572 			 * to do this the slow and painful way. Let's just
   7573 			 * hope this doesn't happen very often.
   7574 			 */
   7575 			struct tcphdr th;
   7576 
   7577 			WM_Q_EVCNT_INCR(txq, tsopain);
   7578 
   7579 			m_copydata(m0, hlen, sizeof(th), &th);
   7580 			if (v4) {
   7581 				struct ip ip;
   7582 
   7583 				m_copydata(m0, offset, sizeof(ip), &ip);
   7584 				ip.ip_len = 0;
   7585 				m_copyback(m0,
   7586 				    offset + offsetof(struct ip, ip_len),
   7587 				    sizeof(ip.ip_len), &ip.ip_len);
   7588 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7589 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7590 			} else {
   7591 				struct ip6_hdr ip6;
   7592 
   7593 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7594 				ip6.ip6_plen = 0;
   7595 				m_copyback(m0,
   7596 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7597 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7598 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7599 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7600 			}
   7601 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7602 			    sizeof(th.th_sum), &th.th_sum);
   7603 
   7604 			tcp_hlen = th.th_off << 2;
   7605 		} else {
   7606 			/*
   7607 			 * TCP/IP headers are in the first mbuf; we can do
   7608 			 * this the easy way.
   7609 			 */
   7610 			struct tcphdr *th;
   7611 
   7612 			if (v4) {
   7613 				struct ip *ip =
   7614 				    (void *)(mtod(m0, char *) + offset);
   7615 				th = (void *)(mtod(m0, char *) + hlen);
   7616 
   7617 				ip->ip_len = 0;
   7618 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7619 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7620 			} else {
   7621 				struct ip6_hdr *ip6 =
   7622 				    (void *)(mtod(m0, char *) + offset);
   7623 				th = (void *)(mtod(m0, char *) + hlen);
   7624 
   7625 				ip6->ip6_plen = 0;
   7626 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7627 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7628 			}
   7629 			tcp_hlen = th->th_off << 2;
   7630 		}
   7631 		hlen += tcp_hlen;
   7632 		*cmdlenp |= NQTX_CMD_TSE;
   7633 
   7634 		if (v4) {
   7635 			WM_Q_EVCNT_INCR(txq, tso);
   7636 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7637 		} else {
   7638 			WM_Q_EVCNT_INCR(txq, tso6);
   7639 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7640 		}
   7641 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7642 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7643 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7644 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7645 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7646 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7647 	} else {
   7648 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7649 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7650 	}
   7651 
   7652 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7653 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7654 		cmdc |= NQTXC_CMD_IP4;
   7655 	}
   7656 
   7657 	if (m0->m_pkthdr.csum_flags &
   7658 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7659 		WM_Q_EVCNT_INCR(txq, tusum);
   7660 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7661 			cmdc |= NQTXC_CMD_TCP;
   7662 		} else {
   7663 			cmdc |= NQTXC_CMD_UDP;
   7664 		}
   7665 		cmdc |= NQTXC_CMD_IP4;
   7666 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7667 	}
   7668 	if (m0->m_pkthdr.csum_flags &
   7669 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7670 		WM_Q_EVCNT_INCR(txq, tusum6);
   7671 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7672 			cmdc |= NQTXC_CMD_TCP;
   7673 		} else {
   7674 			cmdc |= NQTXC_CMD_UDP;
   7675 		}
   7676 		cmdc |= NQTXC_CMD_IP6;
   7677 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7678 	}
   7679 
   7680 	/*
   7681 	 * We don't have to write context descriptor for every packet to
   7682 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7683 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7684 	 * controllers.
   7685 	 * It would be overhead to write context descriptor for every packet,
   7686 	 * however it does not cause problems.
   7687 	 */
   7688 	/* Fill in the context descriptor. */
   7689 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7690 	    htole32(vl_len);
   7691 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7692 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7693 	    htole32(cmdc);
   7694 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7695 	    htole32(mssidx);
   7696 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7697 	DPRINTF(WM_DEBUG_TX,
   7698 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7699 		txq->txq_next, 0, vl_len));
   7700 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7701 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7702 	txs->txs_ndesc++;
   7703 	return 0;
   7704 }
   7705 
   7706 /*
   7707  * wm_nq_start:		[ifnet interface function]
   7708  *
   7709  *	Start packet transmission on the interface for NEWQUEUE devices
   7710  */
   7711 static void
   7712 wm_nq_start(struct ifnet *ifp)
   7713 {
   7714 	struct wm_softc *sc = ifp->if_softc;
   7715 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7716 
   7717 #ifdef WM_MPSAFE
   7718 	KASSERT(if_is_mpsafe(ifp));
   7719 #endif
   7720 	/*
   7721 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7722 	 */
   7723 
   7724 	mutex_enter(txq->txq_lock);
   7725 	if (!txq->txq_stopping)
   7726 		wm_nq_start_locked(ifp);
   7727 	mutex_exit(txq->txq_lock);
   7728 }
   7729 
   7730 static void
   7731 wm_nq_start_locked(struct ifnet *ifp)
   7732 {
   7733 	struct wm_softc *sc = ifp->if_softc;
   7734 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7735 
   7736 	wm_nq_send_common_locked(ifp, txq, false);
   7737 }
   7738 
   7739 static int
   7740 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7741 {
   7742 	int qid;
   7743 	struct wm_softc *sc = ifp->if_softc;
   7744 	struct wm_txqueue *txq;
   7745 
   7746 	qid = wm_select_txqueue(ifp, m);
   7747 	txq = &sc->sc_queue[qid].wmq_txq;
   7748 
   7749 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7750 		m_freem(m);
   7751 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7752 		return ENOBUFS;
   7753 	}
   7754 
   7755 	/*
   7756 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7757 	 */
   7758 	ifp->if_obytes += m->m_pkthdr.len;
   7759 	if (m->m_flags & M_MCAST)
   7760 		ifp->if_omcasts++;
   7761 
   7762 	/*
   7763 	 * The situations which this mutex_tryenter() fails at running time
   7764 	 * are below two patterns.
   7765 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7766 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7767 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7768 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7769 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   7770 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   7771 	 * stuck, either.
   7772 	 */
   7773 	if (mutex_tryenter(txq->txq_lock)) {
   7774 		if (!txq->txq_stopping)
   7775 			wm_nq_transmit_locked(ifp, txq);
   7776 		mutex_exit(txq->txq_lock);
   7777 	}
   7778 
   7779 	return 0;
   7780 }
   7781 
   7782 static void
   7783 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7784 {
   7785 
   7786 	wm_nq_send_common_locked(ifp, txq, true);
   7787 }
   7788 
   7789 static void
   7790 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7791     bool is_transmit)
   7792 {
   7793 	struct wm_softc *sc = ifp->if_softc;
   7794 	struct mbuf *m0;
   7795 	struct wm_txsoft *txs;
   7796 	bus_dmamap_t dmamap;
   7797 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7798 	bool do_csum, sent;
   7799 	bool remap = true;
   7800 
   7801 	KASSERT(mutex_owned(txq->txq_lock));
   7802 
   7803 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7804 		return;
   7805 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7806 		return;
   7807 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7808 		return;
   7809 
   7810 	sent = false;
   7811 
   7812 	/*
   7813 	 * Loop through the send queue, setting up transmit descriptors
   7814 	 * until we drain the queue, or use up all available transmit
   7815 	 * descriptors.
   7816 	 */
   7817 	for (;;) {
   7818 		m0 = NULL;
   7819 
   7820 		/* Get a work queue entry. */
   7821 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7822 			wm_txeof(txq, UINT_MAX);
   7823 			if (txq->txq_sfree == 0) {
   7824 				DPRINTF(WM_DEBUG_TX,
   7825 				    ("%s: TX: no free job descriptors\n",
   7826 					device_xname(sc->sc_dev)));
   7827 				WM_Q_EVCNT_INCR(txq, txsstall);
   7828 				break;
   7829 			}
   7830 		}
   7831 
   7832 		/* Grab a packet off the queue. */
   7833 		if (is_transmit)
   7834 			m0 = pcq_get(txq->txq_interq);
   7835 		else
   7836 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7837 		if (m0 == NULL)
   7838 			break;
   7839 
   7840 		DPRINTF(WM_DEBUG_TX,
   7841 		    ("%s: TX: have packet to transmit: %p\n",
   7842 		    device_xname(sc->sc_dev), m0));
   7843 
   7844 		txs = &txq->txq_soft[txq->txq_snext];
   7845 		dmamap = txs->txs_dmamap;
   7846 
   7847 		/*
   7848 		 * Load the DMA map.  If this fails, the packet either
   7849 		 * didn't fit in the allotted number of segments, or we
   7850 		 * were short on resources.  For the too-many-segments
   7851 		 * case, we simply report an error and drop the packet,
   7852 		 * since we can't sanely copy a jumbo packet to a single
   7853 		 * buffer.
   7854 		 */
   7855 retry:
   7856 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7857 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7858 		if (__predict_false(error)) {
   7859 			if (error == EFBIG) {
   7860 				if (remap == true) {
   7861 					struct mbuf *m;
   7862 
   7863 					remap = false;
   7864 					m = m_defrag(m0, M_NOWAIT);
   7865 					if (m != NULL) {
   7866 						WM_Q_EVCNT_INCR(txq, defrag);
   7867 						m0 = m;
   7868 						goto retry;
   7869 					}
   7870 				}
   7871 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7872 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7873 				    "DMA segments, dropping...\n",
   7874 				    device_xname(sc->sc_dev));
   7875 				wm_dump_mbuf_chain(sc, m0);
   7876 				m_freem(m0);
   7877 				continue;
   7878 			}
   7879 			/* Short on resources, just stop for now. */
   7880 			DPRINTF(WM_DEBUG_TX,
   7881 			    ("%s: TX: dmamap load failed: %d\n",
   7882 				device_xname(sc->sc_dev), error));
   7883 			break;
   7884 		}
   7885 
   7886 		segs_needed = dmamap->dm_nsegs;
   7887 
   7888 		/*
   7889 		 * Ensure we have enough descriptors free to describe
   7890 		 * the packet. Note, we always reserve one descriptor
   7891 		 * at the end of the ring due to the semantics of the
   7892 		 * TDT register, plus one more in the event we need
   7893 		 * to load offload context.
   7894 		 */
   7895 		if (segs_needed > txq->txq_free - 2) {
   7896 			/*
   7897 			 * Not enough free descriptors to transmit this
   7898 			 * packet.  We haven't committed anything yet,
   7899 			 * so just unload the DMA map, put the packet
   7900 			 * pack on the queue, and punt. Notify the upper
   7901 			 * layer that there are no more slots left.
   7902 			 */
   7903 			DPRINTF(WM_DEBUG_TX,
   7904 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7905 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7906 				segs_needed, txq->txq_free - 1));
   7907 			if (!is_transmit)
   7908 				ifp->if_flags |= IFF_OACTIVE;
   7909 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7910 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7911 			WM_Q_EVCNT_INCR(txq, txdstall);
   7912 			break;
   7913 		}
   7914 
   7915 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7916 
   7917 		DPRINTF(WM_DEBUG_TX,
   7918 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7919 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7920 
   7921 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7922 
   7923 		/*
   7924 		 * Store a pointer to the packet so that we can free it
   7925 		 * later.
   7926 		 *
   7927 		 * Initially, we consider the number of descriptors the
   7928 		 * packet uses the number of DMA segments.  This may be
   7929 		 * incremented by 1 if we do checksum offload (a descriptor
   7930 		 * is used to set the checksum context).
   7931 		 */
   7932 		txs->txs_mbuf = m0;
   7933 		txs->txs_firstdesc = txq->txq_next;
   7934 		txs->txs_ndesc = segs_needed;
   7935 
   7936 		/* Set up offload parameters for this packet. */
   7937 		uint32_t cmdlen, fields, dcmdlen;
   7938 		if (m0->m_pkthdr.csum_flags &
   7939 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7940 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7941 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7942 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7943 			    &do_csum) != 0) {
   7944 				/* Error message already displayed. */
   7945 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7946 				continue;
   7947 			}
   7948 		} else {
   7949 			do_csum = false;
   7950 			cmdlen = 0;
   7951 			fields = 0;
   7952 		}
   7953 
   7954 		/* Sync the DMA map. */
   7955 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7956 		    BUS_DMASYNC_PREWRITE);
   7957 
   7958 		/* Initialize the first transmit descriptor. */
   7959 		nexttx = txq->txq_next;
   7960 		if (!do_csum) {
   7961 			/* setup a legacy descriptor */
   7962 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7963 			    dmamap->dm_segs[0].ds_addr);
   7964 			txq->txq_descs[nexttx].wtx_cmdlen =
   7965 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7966 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7967 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7968 			if (vlan_has_tag(m0)) {
   7969 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7970 				    htole32(WTX_CMD_VLE);
   7971 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7972 				    htole16(vlan_get_tag(m0));
   7973 			} else {
   7974 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7975 			}
   7976 			dcmdlen = 0;
   7977 		} else {
   7978 			/* setup an advanced data descriptor */
   7979 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7980 			    htole64(dmamap->dm_segs[0].ds_addr);
   7981 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7982 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7983 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7984 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7985 			    htole32(fields);
   7986 			DPRINTF(WM_DEBUG_TX,
   7987 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7988 				device_xname(sc->sc_dev), nexttx,
   7989 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   7990 			DPRINTF(WM_DEBUG_TX,
   7991 			    ("\t 0x%08x%08x\n", fields,
   7992 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7993 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7994 		}
   7995 
   7996 		lasttx = nexttx;
   7997 		nexttx = WM_NEXTTX(txq, nexttx);
   7998 		/*
   7999 		 * fill in the next descriptors. legacy or advanced format
   8000 		 * is the same here
   8001 		 */
   8002 		for (seg = 1; seg < dmamap->dm_nsegs;
   8003 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8004 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8005 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8006 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8007 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8008 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8009 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8010 			lasttx = nexttx;
   8011 
   8012 			DPRINTF(WM_DEBUG_TX,
   8013 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8014 				device_xname(sc->sc_dev), nexttx,
   8015 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8016 				dmamap->dm_segs[seg].ds_len));
   8017 		}
   8018 
   8019 		KASSERT(lasttx != -1);
   8020 
   8021 		/*
   8022 		 * Set up the command byte on the last descriptor of
   8023 		 * the packet. If we're in the interrupt delay window,
   8024 		 * delay the interrupt.
   8025 		 */
   8026 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8027 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8028 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8029 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8030 
   8031 		txs->txs_lastdesc = lasttx;
   8032 
   8033 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8034 		    device_xname(sc->sc_dev),
   8035 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8036 
   8037 		/* Sync the descriptors we're using. */
   8038 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8039 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8040 
   8041 		/* Give the packet to the chip. */
   8042 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8043 		sent = true;
   8044 
   8045 		DPRINTF(WM_DEBUG_TX,
   8046 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8047 
   8048 		DPRINTF(WM_DEBUG_TX,
   8049 		    ("%s: TX: finished transmitting packet, job %d\n",
   8050 			device_xname(sc->sc_dev), txq->txq_snext));
   8051 
   8052 		/* Advance the tx pointer. */
   8053 		txq->txq_free -= txs->txs_ndesc;
   8054 		txq->txq_next = nexttx;
   8055 
   8056 		txq->txq_sfree--;
   8057 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8058 
   8059 		/* Pass the packet to any BPF listeners. */
   8060 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8061 	}
   8062 
   8063 	if (m0 != NULL) {
   8064 		if (!is_transmit)
   8065 			ifp->if_flags |= IFF_OACTIVE;
   8066 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8067 		WM_Q_EVCNT_INCR(txq, descdrop);
   8068 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8069 			__func__));
   8070 		m_freem(m0);
   8071 	}
   8072 
   8073 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8074 		/* No more slots; notify upper layer. */
   8075 		if (!is_transmit)
   8076 			ifp->if_flags |= IFF_OACTIVE;
   8077 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8078 	}
   8079 
   8080 	if (sent) {
   8081 		/* Set a watchdog timer in case the chip flakes out. */
   8082 		txq->txq_lastsent = time_uptime;
   8083 		txq->txq_sending = true;
   8084 	}
   8085 }
   8086 
   8087 static void
   8088 wm_deferred_start_locked(struct wm_txqueue *txq)
   8089 {
   8090 	struct wm_softc *sc = txq->txq_sc;
   8091 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8092 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8093 	int qid = wmq->wmq_id;
   8094 
   8095 	KASSERT(mutex_owned(txq->txq_lock));
   8096 
   8097 	if (txq->txq_stopping) {
   8098 		mutex_exit(txq->txq_lock);
   8099 		return;
   8100 	}
   8101 
   8102 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8103 		/* XXX need for ALTQ or one CPU system */
   8104 		if (qid == 0)
   8105 			wm_nq_start_locked(ifp);
   8106 		wm_nq_transmit_locked(ifp, txq);
   8107 	} else {
   8108 		/* XXX need for ALTQ or one CPU system */
   8109 		if (qid == 0)
   8110 			wm_start_locked(ifp);
   8111 		wm_transmit_locked(ifp, txq);
   8112 	}
   8113 }
   8114 
   8115 /* Interrupt */
   8116 
   8117 /*
   8118  * wm_txeof:
   8119  *
   8120  *	Helper; handle transmit interrupts.
   8121  */
   8122 static bool
   8123 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8124 {
   8125 	struct wm_softc *sc = txq->txq_sc;
   8126 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8127 	struct wm_txsoft *txs;
   8128 	int count = 0;
   8129 	int i;
   8130 	uint8_t status;
   8131 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8132 	bool more = false;
   8133 
   8134 	KASSERT(mutex_owned(txq->txq_lock));
   8135 
   8136 	if (txq->txq_stopping)
   8137 		return false;
   8138 
   8139 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8140 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8141 	if (wmq->wmq_id == 0)
   8142 		ifp->if_flags &= ~IFF_OACTIVE;
   8143 
   8144 	/*
   8145 	 * Go through the Tx list and free mbufs for those
   8146 	 * frames which have been transmitted.
   8147 	 */
   8148 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8149 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8150 		if (limit-- == 0) {
   8151 			more = true;
   8152 			DPRINTF(WM_DEBUG_TX,
   8153 			    ("%s: TX: loop limited, job %d is not processed\n",
   8154 				device_xname(sc->sc_dev), i));
   8155 			break;
   8156 		}
   8157 
   8158 		txs = &txq->txq_soft[i];
   8159 
   8160 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8161 			device_xname(sc->sc_dev), i));
   8162 
   8163 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8164 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8165 
   8166 		status =
   8167 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8168 		if ((status & WTX_ST_DD) == 0) {
   8169 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8170 			    BUS_DMASYNC_PREREAD);
   8171 			break;
   8172 		}
   8173 
   8174 		count++;
   8175 		DPRINTF(WM_DEBUG_TX,
   8176 		    ("%s: TX: job %d done: descs %d..%d\n",
   8177 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8178 		    txs->txs_lastdesc));
   8179 
   8180 		/*
   8181 		 * XXX We should probably be using the statistics
   8182 		 * XXX registers, but I don't know if they exist
   8183 		 * XXX on chips before the i82544.
   8184 		 */
   8185 
   8186 #ifdef WM_EVENT_COUNTERS
   8187 		if (status & WTX_ST_TU)
   8188 			WM_Q_EVCNT_INCR(txq, underrun);
   8189 #endif /* WM_EVENT_COUNTERS */
   8190 
   8191 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   8192 			ifp->if_oerrors++;
   8193 			if (status & WTX_ST_LC)
   8194 				log(LOG_WARNING, "%s: late collision\n",
   8195 				    device_xname(sc->sc_dev));
   8196 			else if (status & WTX_ST_EC) {
   8197 				ifp->if_collisions += 16;
   8198 				log(LOG_WARNING, "%s: excessive collisions\n",
   8199 				    device_xname(sc->sc_dev));
   8200 			}
   8201 		} else
   8202 			ifp->if_opackets++;
   8203 
   8204 		txq->txq_packets++;
   8205 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8206 
   8207 		txq->txq_free += txs->txs_ndesc;
   8208 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8209 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8210 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8211 		m_freem(txs->txs_mbuf);
   8212 		txs->txs_mbuf = NULL;
   8213 	}
   8214 
   8215 	/* Update the dirty transmit buffer pointer. */
   8216 	txq->txq_sdirty = i;
   8217 	DPRINTF(WM_DEBUG_TX,
   8218 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8219 
   8220 	if (count != 0)
   8221 		rnd_add_uint32(&sc->rnd_source, count);
   8222 
   8223 	/*
   8224 	 * If there are no more pending transmissions, cancel the watchdog
   8225 	 * timer.
   8226 	 */
   8227 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8228 		txq->txq_sending = false;
   8229 
   8230 	return more;
   8231 }
   8232 
   8233 static inline uint32_t
   8234 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8235 {
   8236 	struct wm_softc *sc = rxq->rxq_sc;
   8237 
   8238 	if (sc->sc_type == WM_T_82574)
   8239 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8240 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8241 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8242 	else
   8243 		return rxq->rxq_descs[idx].wrx_status;
   8244 }
   8245 
   8246 static inline uint32_t
   8247 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8248 {
   8249 	struct wm_softc *sc = rxq->rxq_sc;
   8250 
   8251 	if (sc->sc_type == WM_T_82574)
   8252 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8253 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8254 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8255 	else
   8256 		return rxq->rxq_descs[idx].wrx_errors;
   8257 }
   8258 
   8259 static inline uint16_t
   8260 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8261 {
   8262 	struct wm_softc *sc = rxq->rxq_sc;
   8263 
   8264 	if (sc->sc_type == WM_T_82574)
   8265 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8266 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8267 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8268 	else
   8269 		return rxq->rxq_descs[idx].wrx_special;
   8270 }
   8271 
   8272 static inline int
   8273 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8274 {
   8275 	struct wm_softc *sc = rxq->rxq_sc;
   8276 
   8277 	if (sc->sc_type == WM_T_82574)
   8278 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8279 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8280 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8281 	else
   8282 		return rxq->rxq_descs[idx].wrx_len;
   8283 }
   8284 
   8285 #ifdef WM_DEBUG
   8286 static inline uint32_t
   8287 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8288 {
   8289 	struct wm_softc *sc = rxq->rxq_sc;
   8290 
   8291 	if (sc->sc_type == WM_T_82574)
   8292 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8293 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8294 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8295 	else
   8296 		return 0;
   8297 }
   8298 
   8299 static inline uint8_t
   8300 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8301 {
   8302 	struct wm_softc *sc = rxq->rxq_sc;
   8303 
   8304 	if (sc->sc_type == WM_T_82574)
   8305 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8306 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8307 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8308 	else
   8309 		return 0;
   8310 }
   8311 #endif /* WM_DEBUG */
   8312 
   8313 static inline bool
   8314 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8315     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8316 {
   8317 
   8318 	if (sc->sc_type == WM_T_82574)
   8319 		return (status & ext_bit) != 0;
   8320 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8321 		return (status & nq_bit) != 0;
   8322 	else
   8323 		return (status & legacy_bit) != 0;
   8324 }
   8325 
   8326 static inline bool
   8327 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8328     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8329 {
   8330 
   8331 	if (sc->sc_type == WM_T_82574)
   8332 		return (error & ext_bit) != 0;
   8333 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8334 		return (error & nq_bit) != 0;
   8335 	else
   8336 		return (error & legacy_bit) != 0;
   8337 }
   8338 
   8339 static inline bool
   8340 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8341 {
   8342 
   8343 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8344 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8345 		return true;
   8346 	else
   8347 		return false;
   8348 }
   8349 
   8350 static inline bool
   8351 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8352 {
   8353 	struct wm_softc *sc = rxq->rxq_sc;
   8354 
   8355 	/* XXXX missing error bit for newqueue? */
   8356 	if (wm_rxdesc_is_set_error(sc, errors,
   8357 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8358 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8359 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8360 		NQRXC_ERROR_RXE)) {
   8361 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8362 		    EXTRXC_ERROR_SE, 0))
   8363 			log(LOG_WARNING, "%s: symbol error\n",
   8364 			    device_xname(sc->sc_dev));
   8365 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8366 		    EXTRXC_ERROR_SEQ, 0))
   8367 			log(LOG_WARNING, "%s: receive sequence error\n",
   8368 			    device_xname(sc->sc_dev));
   8369 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8370 		    EXTRXC_ERROR_CE, 0))
   8371 			log(LOG_WARNING, "%s: CRC error\n",
   8372 			    device_xname(sc->sc_dev));
   8373 		return true;
   8374 	}
   8375 
   8376 	return false;
   8377 }
   8378 
   8379 static inline bool
   8380 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8381 {
   8382 	struct wm_softc *sc = rxq->rxq_sc;
   8383 
   8384 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8385 		NQRXC_STATUS_DD)) {
   8386 		/* We have processed all of the receive descriptors. */
   8387 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8388 		return false;
   8389 	}
   8390 
   8391 	return true;
   8392 }
   8393 
   8394 static inline bool
   8395 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8396     uint16_t vlantag, struct mbuf *m)
   8397 {
   8398 
   8399 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8400 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8401 		vlan_set_tag(m, le16toh(vlantag));
   8402 	}
   8403 
   8404 	return true;
   8405 }
   8406 
   8407 static inline void
   8408 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8409     uint32_t errors, struct mbuf *m)
   8410 {
   8411 	struct wm_softc *sc = rxq->rxq_sc;
   8412 
   8413 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8414 		if (wm_rxdesc_is_set_status(sc, status,
   8415 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8416 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8417 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8418 			if (wm_rxdesc_is_set_error(sc, errors,
   8419 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8420 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8421 		}
   8422 		if (wm_rxdesc_is_set_status(sc, status,
   8423 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8424 			/*
   8425 			 * Note: we don't know if this was TCP or UDP,
   8426 			 * so we just set both bits, and expect the
   8427 			 * upper layers to deal.
   8428 			 */
   8429 			WM_Q_EVCNT_INCR(rxq, tusum);
   8430 			m->m_pkthdr.csum_flags |=
   8431 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8432 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8433 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8434 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8435 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8436 		}
   8437 	}
   8438 }
   8439 
   8440 /*
   8441  * wm_rxeof:
   8442  *
   8443  *	Helper; handle receive interrupts.
   8444  */
   8445 static bool
   8446 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8447 {
   8448 	struct wm_softc *sc = rxq->rxq_sc;
   8449 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8450 	struct wm_rxsoft *rxs;
   8451 	struct mbuf *m;
   8452 	int i, len;
   8453 	int count = 0;
   8454 	uint32_t status, errors;
   8455 	uint16_t vlantag;
   8456 	bool more = false;
   8457 
   8458 	KASSERT(mutex_owned(rxq->rxq_lock));
   8459 
   8460 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8461 		if (limit-- == 0) {
   8462 			rxq->rxq_ptr = i;
   8463 			more = true;
   8464 			DPRINTF(WM_DEBUG_RX,
   8465 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8466 				device_xname(sc->sc_dev), i));
   8467 			break;
   8468 		}
   8469 
   8470 		rxs = &rxq->rxq_soft[i];
   8471 
   8472 		DPRINTF(WM_DEBUG_RX,
   8473 		    ("%s: RX: checking descriptor %d\n",
   8474 			device_xname(sc->sc_dev), i));
   8475 		wm_cdrxsync(rxq, i,
   8476 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8477 
   8478 		status = wm_rxdesc_get_status(rxq, i);
   8479 		errors = wm_rxdesc_get_errors(rxq, i);
   8480 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8481 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8482 #ifdef WM_DEBUG
   8483 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8484 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8485 #endif
   8486 
   8487 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8488 			/*
   8489 			 * Update the receive pointer holding rxq_lock
   8490 			 * consistent with increment counter.
   8491 			 */
   8492 			rxq->rxq_ptr = i;
   8493 			break;
   8494 		}
   8495 
   8496 		count++;
   8497 		if (__predict_false(rxq->rxq_discard)) {
   8498 			DPRINTF(WM_DEBUG_RX,
   8499 			    ("%s: RX: discarding contents of descriptor %d\n",
   8500 				device_xname(sc->sc_dev), i));
   8501 			wm_init_rxdesc(rxq, i);
   8502 			if (wm_rxdesc_is_eop(rxq, status)) {
   8503 				/* Reset our state. */
   8504 				DPRINTF(WM_DEBUG_RX,
   8505 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8506 					device_xname(sc->sc_dev)));
   8507 				rxq->rxq_discard = 0;
   8508 			}
   8509 			continue;
   8510 		}
   8511 
   8512 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8513 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8514 
   8515 		m = rxs->rxs_mbuf;
   8516 
   8517 		/*
   8518 		 * Add a new receive buffer to the ring, unless of
   8519 		 * course the length is zero. Treat the latter as a
   8520 		 * failed mapping.
   8521 		 */
   8522 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8523 			/*
   8524 			 * Failed, throw away what we've done so
   8525 			 * far, and discard the rest of the packet.
   8526 			 */
   8527 			ifp->if_ierrors++;
   8528 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8529 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8530 			wm_init_rxdesc(rxq, i);
   8531 			if (!wm_rxdesc_is_eop(rxq, status))
   8532 				rxq->rxq_discard = 1;
   8533 			if (rxq->rxq_head != NULL)
   8534 				m_freem(rxq->rxq_head);
   8535 			WM_RXCHAIN_RESET(rxq);
   8536 			DPRINTF(WM_DEBUG_RX,
   8537 			    ("%s: RX: Rx buffer allocation failed, "
   8538 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8539 				rxq->rxq_discard ? " (discard)" : ""));
   8540 			continue;
   8541 		}
   8542 
   8543 		m->m_len = len;
   8544 		rxq->rxq_len += len;
   8545 		DPRINTF(WM_DEBUG_RX,
   8546 		    ("%s: RX: buffer at %p len %d\n",
   8547 			device_xname(sc->sc_dev), m->m_data, len));
   8548 
   8549 		/* If this is not the end of the packet, keep looking. */
   8550 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8551 			WM_RXCHAIN_LINK(rxq, m);
   8552 			DPRINTF(WM_DEBUG_RX,
   8553 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8554 				device_xname(sc->sc_dev), rxq->rxq_len));
   8555 			continue;
   8556 		}
   8557 
   8558 		/*
   8559 		 * Okay, we have the entire packet now. The chip is
   8560 		 * configured to include the FCS except I350 and I21[01]
   8561 		 * (not all chips can be configured to strip it),
   8562 		 * so we need to trim it.
   8563 		 * May need to adjust length of previous mbuf in the
   8564 		 * chain if the current mbuf is too short.
   8565 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8566 		 * is always set in I350, so we don't trim it.
   8567 		 */
   8568 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8569 		    && (sc->sc_type != WM_T_I210)
   8570 		    && (sc->sc_type != WM_T_I211)) {
   8571 			if (m->m_len < ETHER_CRC_LEN) {
   8572 				rxq->rxq_tail->m_len
   8573 				    -= (ETHER_CRC_LEN - m->m_len);
   8574 				m->m_len = 0;
   8575 			} else
   8576 				m->m_len -= ETHER_CRC_LEN;
   8577 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8578 		} else
   8579 			len = rxq->rxq_len;
   8580 
   8581 		WM_RXCHAIN_LINK(rxq, m);
   8582 
   8583 		*rxq->rxq_tailp = NULL;
   8584 		m = rxq->rxq_head;
   8585 
   8586 		WM_RXCHAIN_RESET(rxq);
   8587 
   8588 		DPRINTF(WM_DEBUG_RX,
   8589 		    ("%s: RX: have entire packet, len -> %d\n",
   8590 			device_xname(sc->sc_dev), len));
   8591 
   8592 		/* If an error occurred, update stats and drop the packet. */
   8593 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8594 			m_freem(m);
   8595 			continue;
   8596 		}
   8597 
   8598 		/* No errors.  Receive the packet. */
   8599 		m_set_rcvif(m, ifp);
   8600 		m->m_pkthdr.len = len;
   8601 		/*
   8602 		 * TODO
   8603 		 * should be save rsshash and rsstype to this mbuf.
   8604 		 */
   8605 		DPRINTF(WM_DEBUG_RX,
   8606 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8607 			device_xname(sc->sc_dev), rsstype, rsshash));
   8608 
   8609 		/*
   8610 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8611 		 * for us.  Associate the tag with the packet.
   8612 		 */
   8613 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8614 			continue;
   8615 
   8616 		/* Set up checksum info for this packet. */
   8617 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8618 		/*
   8619 		 * Update the receive pointer holding rxq_lock consistent with
   8620 		 * increment counter.
   8621 		 */
   8622 		rxq->rxq_ptr = i;
   8623 		rxq->rxq_packets++;
   8624 		rxq->rxq_bytes += len;
   8625 		mutex_exit(rxq->rxq_lock);
   8626 
   8627 		/* Pass it on. */
   8628 		if_percpuq_enqueue(sc->sc_ipq, m);
   8629 
   8630 		mutex_enter(rxq->rxq_lock);
   8631 
   8632 		if (rxq->rxq_stopping)
   8633 			break;
   8634 	}
   8635 
   8636 	if (count != 0)
   8637 		rnd_add_uint32(&sc->rnd_source, count);
   8638 
   8639 	DPRINTF(WM_DEBUG_RX,
   8640 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8641 
   8642 	return more;
   8643 }
   8644 
   8645 /*
   8646  * wm_linkintr_gmii:
   8647  *
   8648  *	Helper; handle link interrupts for GMII.
   8649  */
   8650 static void
   8651 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8652 {
   8653 
   8654 	KASSERT(WM_CORE_LOCKED(sc));
   8655 
   8656 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8657 		__func__));
   8658 
   8659 	if (icr & ICR_LSC) {
   8660 		uint32_t reg;
   8661 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8662 
   8663 		if ((status & STATUS_LU) != 0) {
   8664 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8665 				device_xname(sc->sc_dev),
   8666 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8667 		} else {
   8668 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8669 				device_xname(sc->sc_dev)));
   8670 		}
   8671 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8672 			wm_gig_downshift_workaround_ich8lan(sc);
   8673 
   8674 		if ((sc->sc_type == WM_T_ICH8)
   8675 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8676 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8677 		}
   8678 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8679 			device_xname(sc->sc_dev)));
   8680 		mii_pollstat(&sc->sc_mii);
   8681 		if (sc->sc_type == WM_T_82543) {
   8682 			int miistatus, active;
   8683 
   8684 			/*
   8685 			 * With 82543, we need to force speed and
   8686 			 * duplex on the MAC equal to what the PHY
   8687 			 * speed and duplex configuration is.
   8688 			 */
   8689 			miistatus = sc->sc_mii.mii_media_status;
   8690 
   8691 			if (miistatus & IFM_ACTIVE) {
   8692 				active = sc->sc_mii.mii_media_active;
   8693 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8694 				switch (IFM_SUBTYPE(active)) {
   8695 				case IFM_10_T:
   8696 					sc->sc_ctrl |= CTRL_SPEED_10;
   8697 					break;
   8698 				case IFM_100_TX:
   8699 					sc->sc_ctrl |= CTRL_SPEED_100;
   8700 					break;
   8701 				case IFM_1000_T:
   8702 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8703 					break;
   8704 				default:
   8705 					/*
   8706 					 * fiber?
   8707 					 * Shoud not enter here.
   8708 					 */
   8709 					printf("unknown media (%x)\n", active);
   8710 					break;
   8711 				}
   8712 				if (active & IFM_FDX)
   8713 					sc->sc_ctrl |= CTRL_FD;
   8714 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8715 			}
   8716 		} else if (sc->sc_type == WM_T_PCH) {
   8717 			wm_k1_gig_workaround_hv(sc,
   8718 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8719 		}
   8720 
   8721 		if ((sc->sc_phytype == WMPHY_82578)
   8722 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8723 			== IFM_1000_T)) {
   8724 
   8725 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8726 				delay(200*1000); /* XXX too big */
   8727 
   8728 				/* Link stall fix for link up */
   8729 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8730 				    HV_MUX_DATA_CTRL,
   8731 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8732 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8733 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8734 				    HV_MUX_DATA_CTRL,
   8735 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8736 			}
   8737 		}
   8738 		/*
   8739 		 * I217 Packet Loss issue:
   8740 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8741 		 * on power up.
   8742 		 * Set the Beacon Duration for I217 to 8 usec
   8743 		 */
   8744 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8745 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8746 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8747 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8748 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8749 		}
   8750 
   8751 		/* XXX Work-around I218 hang issue */
   8752 		/* e1000_k1_workaround_lpt_lp() */
   8753 
   8754 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8755 			/*
   8756 			 * Set platform power management values for Latency
   8757 			 * Tolerance Reporting (LTR)
   8758 			 */
   8759 			wm_platform_pm_pch_lpt(sc,
   8760 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8761 		}
   8762 
   8763 		/* FEXTNVM6 K1-off workaround */
   8764 		if (sc->sc_type == WM_T_PCH_SPT) {
   8765 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8766 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8767 			    & FEXTNVM6_K1_OFF_ENABLE)
   8768 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8769 			else
   8770 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8771 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8772 		}
   8773 	} else if (icr & ICR_RXSEQ) {
   8774 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8775 			device_xname(sc->sc_dev)));
   8776 	}
   8777 }
   8778 
   8779 /*
   8780  * wm_linkintr_tbi:
   8781  *
   8782  *	Helper; handle link interrupts for TBI mode.
   8783  */
   8784 static void
   8785 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8786 {
   8787 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8788 	uint32_t status;
   8789 
   8790 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8791 		__func__));
   8792 
   8793 	status = CSR_READ(sc, WMREG_STATUS);
   8794 	if (icr & ICR_LSC) {
   8795 		wm_check_for_link(sc);
   8796 		if (status & STATUS_LU) {
   8797 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8798 				device_xname(sc->sc_dev),
   8799 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8800 			/*
   8801 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8802 			 * so we should update sc->sc_ctrl
   8803 			 */
   8804 
   8805 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8806 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8807 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8808 			if (status & STATUS_FD)
   8809 				sc->sc_tctl |=
   8810 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8811 			else
   8812 				sc->sc_tctl |=
   8813 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8814 			if (sc->sc_ctrl & CTRL_TFCE)
   8815 				sc->sc_fcrtl |= FCRTL_XONE;
   8816 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8817 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8818 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   8819 			sc->sc_tbi_linkup = 1;
   8820 			if_link_state_change(ifp, LINK_STATE_UP);
   8821 		} else {
   8822 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8823 				device_xname(sc->sc_dev)));
   8824 			sc->sc_tbi_linkup = 0;
   8825 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8826 		}
   8827 		/* Update LED */
   8828 		wm_tbi_serdes_set_linkled(sc);
   8829 	} else if (icr & ICR_RXSEQ) {
   8830 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   8831 			device_xname(sc->sc_dev)));
   8832 	}
   8833 }
   8834 
   8835 /*
   8836  * wm_linkintr_serdes:
   8837  *
   8838  *	Helper; handle link interrupts for TBI mode.
   8839  */
   8840 static void
   8841 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8842 {
   8843 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8844 	struct mii_data *mii = &sc->sc_mii;
   8845 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8846 	uint32_t pcs_adv, pcs_lpab, reg;
   8847 
   8848 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8849 		__func__));
   8850 
   8851 	if (icr & ICR_LSC) {
   8852 		/* Check PCS */
   8853 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8854 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8855 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8856 				device_xname(sc->sc_dev)));
   8857 			mii->mii_media_status |= IFM_ACTIVE;
   8858 			sc->sc_tbi_linkup = 1;
   8859 			if_link_state_change(ifp, LINK_STATE_UP);
   8860 		} else {
   8861 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8862 				device_xname(sc->sc_dev)));
   8863 			mii->mii_media_status |= IFM_NONE;
   8864 			sc->sc_tbi_linkup = 0;
   8865 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8866 			wm_tbi_serdes_set_linkled(sc);
   8867 			return;
   8868 		}
   8869 		mii->mii_media_active |= IFM_1000_SX;
   8870 		if ((reg & PCS_LSTS_FDX) != 0)
   8871 			mii->mii_media_active |= IFM_FDX;
   8872 		else
   8873 			mii->mii_media_active |= IFM_HDX;
   8874 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8875 			/* Check flow */
   8876 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8877 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8878 				DPRINTF(WM_DEBUG_LINK,
   8879 				    ("XXX LINKOK but not ACOMP\n"));
   8880 				return;
   8881 			}
   8882 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8883 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8884 			DPRINTF(WM_DEBUG_LINK,
   8885 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8886 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8887 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8888 				mii->mii_media_active |= IFM_FLOW
   8889 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8890 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8891 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8892 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8893 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8894 				mii->mii_media_active |= IFM_FLOW
   8895 				    | IFM_ETH_TXPAUSE;
   8896 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8897 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8898 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8899 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8900 				mii->mii_media_active |= IFM_FLOW
   8901 				    | IFM_ETH_RXPAUSE;
   8902 		}
   8903 		/* Update LED */
   8904 		wm_tbi_serdes_set_linkled(sc);
   8905 	} else {
   8906 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   8907 		    device_xname(sc->sc_dev)));
   8908 	}
   8909 }
   8910 
   8911 /*
   8912  * wm_linkintr:
   8913  *
   8914  *	Helper; handle link interrupts.
   8915  */
   8916 static void
   8917 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8918 {
   8919 
   8920 	KASSERT(WM_CORE_LOCKED(sc));
   8921 
   8922 	if (sc->sc_flags & WM_F_HAS_MII)
   8923 		wm_linkintr_gmii(sc, icr);
   8924 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8925 	    && (sc->sc_type >= WM_T_82575))
   8926 		wm_linkintr_serdes(sc, icr);
   8927 	else
   8928 		wm_linkintr_tbi(sc, icr);
   8929 }
   8930 
   8931 /*
   8932  * wm_intr_legacy:
   8933  *
   8934  *	Interrupt service routine for INTx and MSI.
   8935  */
   8936 static int
   8937 wm_intr_legacy(void *arg)
   8938 {
   8939 	struct wm_softc *sc = arg;
   8940 	struct wm_queue *wmq = &sc->sc_queue[0];
   8941 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8942 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8943 	uint32_t icr, rndval = 0;
   8944 	int handled = 0;
   8945 
   8946 	while (1 /* CONSTCOND */) {
   8947 		icr = CSR_READ(sc, WMREG_ICR);
   8948 		if ((icr & sc->sc_icr) == 0)
   8949 			break;
   8950 		if (handled == 0) {
   8951 			DPRINTF(WM_DEBUG_TX,
   8952 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8953 		}
   8954 		if (rndval == 0)
   8955 			rndval = icr;
   8956 
   8957 		mutex_enter(rxq->rxq_lock);
   8958 
   8959 		if (rxq->rxq_stopping) {
   8960 			mutex_exit(rxq->rxq_lock);
   8961 			break;
   8962 		}
   8963 
   8964 		handled = 1;
   8965 
   8966 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8967 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8968 			DPRINTF(WM_DEBUG_RX,
   8969 			    ("%s: RX: got Rx intr 0x%08x\n",
   8970 				device_xname(sc->sc_dev),
   8971 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   8972 			WM_Q_EVCNT_INCR(rxq, intr);
   8973 		}
   8974 #endif
   8975 		/*
   8976 		 * wm_rxeof() does *not* call upper layer functions directly,
   8977 		 * as if_percpuq_enqueue() just call softint_schedule().
   8978 		 * So, we can call wm_rxeof() in interrupt context.
   8979 		 */
   8980 		wm_rxeof(rxq, UINT_MAX);
   8981 
   8982 		mutex_exit(rxq->rxq_lock);
   8983 		mutex_enter(txq->txq_lock);
   8984 
   8985 		if (txq->txq_stopping) {
   8986 			mutex_exit(txq->txq_lock);
   8987 			break;
   8988 		}
   8989 
   8990 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8991 		if (icr & ICR_TXDW) {
   8992 			DPRINTF(WM_DEBUG_TX,
   8993 			    ("%s: TX: got TXDW interrupt\n",
   8994 				device_xname(sc->sc_dev)));
   8995 			WM_Q_EVCNT_INCR(txq, txdw);
   8996 		}
   8997 #endif
   8998 		wm_txeof(txq, UINT_MAX);
   8999 
   9000 		mutex_exit(txq->txq_lock);
   9001 		WM_CORE_LOCK(sc);
   9002 
   9003 		if (sc->sc_core_stopping) {
   9004 			WM_CORE_UNLOCK(sc);
   9005 			break;
   9006 		}
   9007 
   9008 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9009 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9010 			wm_linkintr(sc, icr);
   9011 		}
   9012 
   9013 		WM_CORE_UNLOCK(sc);
   9014 
   9015 		if (icr & ICR_RXO) {
   9016 #if defined(WM_DEBUG)
   9017 			log(LOG_WARNING, "%s: Receive overrun\n",
   9018 			    device_xname(sc->sc_dev));
   9019 #endif /* defined(WM_DEBUG) */
   9020 		}
   9021 	}
   9022 
   9023 	rnd_add_uint32(&sc->rnd_source, rndval);
   9024 
   9025 	if (handled) {
   9026 		/* Try to get more packets going. */
   9027 		softint_schedule(wmq->wmq_si);
   9028 	}
   9029 
   9030 	return handled;
   9031 }
   9032 
   9033 static inline void
   9034 wm_txrxintr_disable(struct wm_queue *wmq)
   9035 {
   9036 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9037 
   9038 	if (sc->sc_type == WM_T_82574)
   9039 		CSR_WRITE(sc, WMREG_IMC,
   9040 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9041 	else if (sc->sc_type == WM_T_82575)
   9042 		CSR_WRITE(sc, WMREG_EIMC,
   9043 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9044 	else
   9045 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9046 }
   9047 
   9048 static inline void
   9049 wm_txrxintr_enable(struct wm_queue *wmq)
   9050 {
   9051 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9052 
   9053 	wm_itrs_calculate(sc, wmq);
   9054 
   9055 	/*
   9056 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9057 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9058 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9059 	 * while each wm_handle_queue(wmq) is runnig.
   9060 	 */
   9061 	if (sc->sc_type == WM_T_82574)
   9062 		CSR_WRITE(sc, WMREG_IMS,
   9063 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9064 	else if (sc->sc_type == WM_T_82575)
   9065 		CSR_WRITE(sc, WMREG_EIMS,
   9066 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9067 	else
   9068 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9069 }
   9070 
   9071 static int
   9072 wm_txrxintr_msix(void *arg)
   9073 {
   9074 	struct wm_queue *wmq = arg;
   9075 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9076 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9077 	struct wm_softc *sc = txq->txq_sc;
   9078 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9079 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9080 	bool txmore;
   9081 	bool rxmore;
   9082 
   9083 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9084 
   9085 	DPRINTF(WM_DEBUG_TX,
   9086 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9087 
   9088 	wm_txrxintr_disable(wmq);
   9089 
   9090 	mutex_enter(txq->txq_lock);
   9091 
   9092 	if (txq->txq_stopping) {
   9093 		mutex_exit(txq->txq_lock);
   9094 		return 0;
   9095 	}
   9096 
   9097 	WM_Q_EVCNT_INCR(txq, txdw);
   9098 	txmore = wm_txeof(txq, txlimit);
   9099 	/* wm_deferred start() is done in wm_handle_queue(). */
   9100 	mutex_exit(txq->txq_lock);
   9101 
   9102 	DPRINTF(WM_DEBUG_RX,
   9103 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9104 	mutex_enter(rxq->rxq_lock);
   9105 
   9106 	if (rxq->rxq_stopping) {
   9107 		mutex_exit(rxq->rxq_lock);
   9108 		return 0;
   9109 	}
   9110 
   9111 	WM_Q_EVCNT_INCR(rxq, intr);
   9112 	rxmore = wm_rxeof(rxq, rxlimit);
   9113 	mutex_exit(rxq->rxq_lock);
   9114 
   9115 	wm_itrs_writereg(sc, wmq);
   9116 
   9117 	if (txmore || rxmore)
   9118 		softint_schedule(wmq->wmq_si);
   9119 	else
   9120 		wm_txrxintr_enable(wmq);
   9121 
   9122 	return 1;
   9123 }
   9124 
   9125 static void
   9126 wm_handle_queue(void *arg)
   9127 {
   9128 	struct wm_queue *wmq = arg;
   9129 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9130 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9131 	struct wm_softc *sc = txq->txq_sc;
   9132 	u_int txlimit = sc->sc_tx_process_limit;
   9133 	u_int rxlimit = sc->sc_rx_process_limit;
   9134 	bool txmore;
   9135 	bool rxmore;
   9136 
   9137 	mutex_enter(txq->txq_lock);
   9138 	if (txq->txq_stopping) {
   9139 		mutex_exit(txq->txq_lock);
   9140 		return;
   9141 	}
   9142 	txmore = wm_txeof(txq, txlimit);
   9143 	wm_deferred_start_locked(txq);
   9144 	mutex_exit(txq->txq_lock);
   9145 
   9146 	mutex_enter(rxq->rxq_lock);
   9147 	if (rxq->rxq_stopping) {
   9148 		mutex_exit(rxq->rxq_lock);
   9149 		return;
   9150 	}
   9151 	WM_Q_EVCNT_INCR(rxq, defer);
   9152 	rxmore = wm_rxeof(rxq, rxlimit);
   9153 	mutex_exit(rxq->rxq_lock);
   9154 
   9155 	if (txmore || rxmore)
   9156 		softint_schedule(wmq->wmq_si);
   9157 	else
   9158 		wm_txrxintr_enable(wmq);
   9159 }
   9160 
   9161 /*
   9162  * wm_linkintr_msix:
   9163  *
   9164  *	Interrupt service routine for link status change for MSI-X.
   9165  */
   9166 static int
   9167 wm_linkintr_msix(void *arg)
   9168 {
   9169 	struct wm_softc *sc = arg;
   9170 	uint32_t reg;
   9171 	bool has_rxo;
   9172 
   9173 	DPRINTF(WM_DEBUG_LINK,
   9174 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9175 
   9176 	reg = CSR_READ(sc, WMREG_ICR);
   9177 	WM_CORE_LOCK(sc);
   9178 	if (sc->sc_core_stopping)
   9179 		goto out;
   9180 
   9181 	if ((reg & ICR_LSC) != 0) {
   9182 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9183 		wm_linkintr(sc, ICR_LSC);
   9184 	}
   9185 
   9186 	/*
   9187 	 * XXX 82574 MSI-X mode workaround
   9188 	 *
   9189 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9190 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9191 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9192 	 * interrupts by writing WMREG_ICS to process receive packets.
   9193 	 */
   9194 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9195 #if defined(WM_DEBUG)
   9196 		log(LOG_WARNING, "%s: Receive overrun\n",
   9197 		    device_xname(sc->sc_dev));
   9198 #endif /* defined(WM_DEBUG) */
   9199 
   9200 		has_rxo = true;
   9201 		/*
   9202 		 * The RXO interrupt is very high rate when receive traffic is
   9203 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9204 		 * interrupts. ICR_OTHER will be enabled at the end of
   9205 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9206 		 * ICR_RXQ(1) interrupts.
   9207 		 */
   9208 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9209 
   9210 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9211 	}
   9212 
   9213 
   9214 
   9215 out:
   9216 	WM_CORE_UNLOCK(sc);
   9217 
   9218 	if (sc->sc_type == WM_T_82574) {
   9219 		if (!has_rxo)
   9220 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9221 		else
   9222 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9223 	} else if (sc->sc_type == WM_T_82575)
   9224 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9225 	else
   9226 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9227 
   9228 	return 1;
   9229 }
   9230 
   9231 /*
   9232  * Media related.
   9233  * GMII, SGMII, TBI (and SERDES)
   9234  */
   9235 
   9236 /* Common */
   9237 
   9238 /*
   9239  * wm_tbi_serdes_set_linkled:
   9240  *
   9241  *	Update the link LED on TBI and SERDES devices.
   9242  */
   9243 static void
   9244 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9245 {
   9246 
   9247 	if (sc->sc_tbi_linkup)
   9248 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9249 	else
   9250 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9251 
   9252 	/* 82540 or newer devices are active low */
   9253 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9254 
   9255 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9256 }
   9257 
   9258 /* GMII related */
   9259 
   9260 /*
   9261  * wm_gmii_reset:
   9262  *
   9263  *	Reset the PHY.
   9264  */
   9265 static void
   9266 wm_gmii_reset(struct wm_softc *sc)
   9267 {
   9268 	uint32_t reg;
   9269 	int rv;
   9270 
   9271 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9272 		device_xname(sc->sc_dev), __func__));
   9273 
   9274 	rv = sc->phy.acquire(sc);
   9275 	if (rv != 0) {
   9276 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9277 		    __func__);
   9278 		return;
   9279 	}
   9280 
   9281 	switch (sc->sc_type) {
   9282 	case WM_T_82542_2_0:
   9283 	case WM_T_82542_2_1:
   9284 		/* null */
   9285 		break;
   9286 	case WM_T_82543:
   9287 		/*
   9288 		 * With 82543, we need to force speed and duplex on the MAC
   9289 		 * equal to what the PHY speed and duplex configuration is.
   9290 		 * In addition, we need to perform a hardware reset on the PHY
   9291 		 * to take it out of reset.
   9292 		 */
   9293 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9294 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9295 
   9296 		/* The PHY reset pin is active-low. */
   9297 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9298 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9299 		    CTRL_EXT_SWDPIN(4));
   9300 		reg |= CTRL_EXT_SWDPIO(4);
   9301 
   9302 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9303 		CSR_WRITE_FLUSH(sc);
   9304 		delay(10*1000);
   9305 
   9306 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9307 		CSR_WRITE_FLUSH(sc);
   9308 		delay(150);
   9309 #if 0
   9310 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9311 #endif
   9312 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9313 		break;
   9314 	case WM_T_82544:	/* reset 10000us */
   9315 	case WM_T_82540:
   9316 	case WM_T_82545:
   9317 	case WM_T_82545_3:
   9318 	case WM_T_82546:
   9319 	case WM_T_82546_3:
   9320 	case WM_T_82541:
   9321 	case WM_T_82541_2:
   9322 	case WM_T_82547:
   9323 	case WM_T_82547_2:
   9324 	case WM_T_82571:	/* reset 100us */
   9325 	case WM_T_82572:
   9326 	case WM_T_82573:
   9327 	case WM_T_82574:
   9328 	case WM_T_82575:
   9329 	case WM_T_82576:
   9330 	case WM_T_82580:
   9331 	case WM_T_I350:
   9332 	case WM_T_I354:
   9333 	case WM_T_I210:
   9334 	case WM_T_I211:
   9335 	case WM_T_82583:
   9336 	case WM_T_80003:
   9337 		/* generic reset */
   9338 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9339 		CSR_WRITE_FLUSH(sc);
   9340 		delay(20000);
   9341 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9342 		CSR_WRITE_FLUSH(sc);
   9343 		delay(20000);
   9344 
   9345 		if ((sc->sc_type == WM_T_82541)
   9346 		    || (sc->sc_type == WM_T_82541_2)
   9347 		    || (sc->sc_type == WM_T_82547)
   9348 		    || (sc->sc_type == WM_T_82547_2)) {
   9349 			/* workaround for igp are done in igp_reset() */
   9350 			/* XXX add code to set LED after phy reset */
   9351 		}
   9352 		break;
   9353 	case WM_T_ICH8:
   9354 	case WM_T_ICH9:
   9355 	case WM_T_ICH10:
   9356 	case WM_T_PCH:
   9357 	case WM_T_PCH2:
   9358 	case WM_T_PCH_LPT:
   9359 	case WM_T_PCH_SPT:
   9360 	case WM_T_PCH_CNP:
   9361 		/* generic reset */
   9362 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9363 		CSR_WRITE_FLUSH(sc);
   9364 		delay(100);
   9365 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9366 		CSR_WRITE_FLUSH(sc);
   9367 		delay(150);
   9368 		break;
   9369 	default:
   9370 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9371 		    __func__);
   9372 		break;
   9373 	}
   9374 
   9375 	sc->phy.release(sc);
   9376 
   9377 	/* get_cfg_done */
   9378 	wm_get_cfg_done(sc);
   9379 
   9380 	/* extra setup */
   9381 	switch (sc->sc_type) {
   9382 	case WM_T_82542_2_0:
   9383 	case WM_T_82542_2_1:
   9384 	case WM_T_82543:
   9385 	case WM_T_82544:
   9386 	case WM_T_82540:
   9387 	case WM_T_82545:
   9388 	case WM_T_82545_3:
   9389 	case WM_T_82546:
   9390 	case WM_T_82546_3:
   9391 	case WM_T_82541_2:
   9392 	case WM_T_82547_2:
   9393 	case WM_T_82571:
   9394 	case WM_T_82572:
   9395 	case WM_T_82573:
   9396 	case WM_T_82574:
   9397 	case WM_T_82583:
   9398 	case WM_T_82575:
   9399 	case WM_T_82576:
   9400 	case WM_T_82580:
   9401 	case WM_T_I350:
   9402 	case WM_T_I354:
   9403 	case WM_T_I210:
   9404 	case WM_T_I211:
   9405 	case WM_T_80003:
   9406 		/* null */
   9407 		break;
   9408 	case WM_T_82541:
   9409 	case WM_T_82547:
   9410 		/* XXX Configure actively LED after PHY reset */
   9411 		break;
   9412 	case WM_T_ICH8:
   9413 	case WM_T_ICH9:
   9414 	case WM_T_ICH10:
   9415 	case WM_T_PCH:
   9416 	case WM_T_PCH2:
   9417 	case WM_T_PCH_LPT:
   9418 	case WM_T_PCH_SPT:
   9419 	case WM_T_PCH_CNP:
   9420 		wm_phy_post_reset(sc);
   9421 		break;
   9422 	default:
   9423 		panic("%s: unknown type\n", __func__);
   9424 		break;
   9425 	}
   9426 }
   9427 
   9428 /*
   9429  * Setup sc_phytype and mii_{read|write}reg.
   9430  *
   9431  *  To identify PHY type, correct read/write function should be selected.
   9432  * To select correct read/write function, PCI ID or MAC type are required
   9433  * without accessing PHY registers.
   9434  *
   9435  *  On the first call of this function, PHY ID is not known yet. Check
   9436  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9437  * result might be incorrect.
   9438  *
   9439  *  In the second call, PHY OUI and model is used to identify PHY type.
   9440  * It might not be perfpect because of the lack of compared entry, but it
   9441  * would be better than the first call.
   9442  *
   9443  *  If the detected new result and previous assumption is different,
   9444  * diagnous message will be printed.
   9445  */
   9446 static void
   9447 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9448     uint16_t phy_model)
   9449 {
   9450 	device_t dev = sc->sc_dev;
   9451 	struct mii_data *mii = &sc->sc_mii;
   9452 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9453 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9454 	mii_readreg_t new_readreg;
   9455 	mii_writereg_t new_writereg;
   9456 
   9457 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9458 		device_xname(sc->sc_dev), __func__));
   9459 
   9460 	if (mii->mii_readreg == NULL) {
   9461 		/*
   9462 		 *  This is the first call of this function. For ICH and PCH
   9463 		 * variants, it's difficult to determine the PHY access method
   9464 		 * by sc_type, so use the PCI product ID for some devices.
   9465 		 */
   9466 
   9467 		switch (sc->sc_pcidevid) {
   9468 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9469 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9470 			/* 82577 */
   9471 			new_phytype = WMPHY_82577;
   9472 			break;
   9473 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9474 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9475 			/* 82578 */
   9476 			new_phytype = WMPHY_82578;
   9477 			break;
   9478 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9479 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9480 			/* 82579 */
   9481 			new_phytype = WMPHY_82579;
   9482 			break;
   9483 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9484 		case PCI_PRODUCT_INTEL_82801I_BM:
   9485 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9486 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9487 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9488 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9489 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9490 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9491 			/* ICH8, 9, 10 with 82567 */
   9492 			new_phytype = WMPHY_BM;
   9493 			break;
   9494 		default:
   9495 			break;
   9496 		}
   9497 	} else {
   9498 		/* It's not the first call. Use PHY OUI and model */
   9499 		switch (phy_oui) {
   9500 		case MII_OUI_ATHEROS: /* XXX ??? */
   9501 			switch (phy_model) {
   9502 			case 0x0004: /* XXX */
   9503 				new_phytype = WMPHY_82578;
   9504 				break;
   9505 			default:
   9506 				break;
   9507 			}
   9508 			break;
   9509 		case MII_OUI_xxMARVELL:
   9510 			switch (phy_model) {
   9511 			case MII_MODEL_xxMARVELL_I210:
   9512 				new_phytype = WMPHY_I210;
   9513 				break;
   9514 			case MII_MODEL_xxMARVELL_E1011:
   9515 			case MII_MODEL_xxMARVELL_E1000_3:
   9516 			case MII_MODEL_xxMARVELL_E1000_5:
   9517 			case MII_MODEL_xxMARVELL_E1112:
   9518 				new_phytype = WMPHY_M88;
   9519 				break;
   9520 			case MII_MODEL_xxMARVELL_E1149:
   9521 				new_phytype = WMPHY_BM;
   9522 				break;
   9523 			case MII_MODEL_xxMARVELL_E1111:
   9524 			case MII_MODEL_xxMARVELL_I347:
   9525 			case MII_MODEL_xxMARVELL_E1512:
   9526 			case MII_MODEL_xxMARVELL_E1340M:
   9527 			case MII_MODEL_xxMARVELL_E1543:
   9528 				new_phytype = WMPHY_M88;
   9529 				break;
   9530 			case MII_MODEL_xxMARVELL_I82563:
   9531 				new_phytype = WMPHY_GG82563;
   9532 				break;
   9533 			default:
   9534 				break;
   9535 			}
   9536 			break;
   9537 		case MII_OUI_INTEL:
   9538 			switch (phy_model) {
   9539 			case MII_MODEL_INTEL_I82577:
   9540 				new_phytype = WMPHY_82577;
   9541 				break;
   9542 			case MII_MODEL_INTEL_I82579:
   9543 				new_phytype = WMPHY_82579;
   9544 				break;
   9545 			case MII_MODEL_INTEL_I217:
   9546 				new_phytype = WMPHY_I217;
   9547 				break;
   9548 			case MII_MODEL_INTEL_I82580:
   9549 			case MII_MODEL_INTEL_I350:
   9550 				new_phytype = WMPHY_82580;
   9551 				break;
   9552 			default:
   9553 				break;
   9554 			}
   9555 			break;
   9556 		case MII_OUI_yyINTEL:
   9557 			switch (phy_model) {
   9558 			case MII_MODEL_yyINTEL_I82562G:
   9559 			case MII_MODEL_yyINTEL_I82562EM:
   9560 			case MII_MODEL_yyINTEL_I82562ET:
   9561 				new_phytype = WMPHY_IFE;
   9562 				break;
   9563 			case MII_MODEL_yyINTEL_IGP01E1000:
   9564 				new_phytype = WMPHY_IGP;
   9565 				break;
   9566 			case MII_MODEL_yyINTEL_I82566:
   9567 				new_phytype = WMPHY_IGP_3;
   9568 				break;
   9569 			default:
   9570 				break;
   9571 			}
   9572 			break;
   9573 		default:
   9574 			break;
   9575 		}
   9576 		if (new_phytype == WMPHY_UNKNOWN)
   9577 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9578 			    __func__);
   9579 
   9580 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9581 		    && (sc->sc_phytype != new_phytype )) {
   9582 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9583 			    "was incorrect. PHY type from PHY ID = %u\n",
   9584 			    sc->sc_phytype, new_phytype);
   9585 		}
   9586 	}
   9587 
   9588 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9589 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9590 		/* SGMII */
   9591 		new_readreg = wm_sgmii_readreg;
   9592 		new_writereg = wm_sgmii_writereg;
   9593 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9594 		/* BM2 (phyaddr == 1) */
   9595 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9596 		    && (new_phytype != WMPHY_BM)
   9597 		    && (new_phytype != WMPHY_UNKNOWN))
   9598 			doubt_phytype = new_phytype;
   9599 		new_phytype = WMPHY_BM;
   9600 		new_readreg = wm_gmii_bm_readreg;
   9601 		new_writereg = wm_gmii_bm_writereg;
   9602 	} else if (sc->sc_type >= WM_T_PCH) {
   9603 		/* All PCH* use _hv_ */
   9604 		new_readreg = wm_gmii_hv_readreg;
   9605 		new_writereg = wm_gmii_hv_writereg;
   9606 	} else if (sc->sc_type >= WM_T_ICH8) {
   9607 		/* non-82567 ICH8, 9 and 10 */
   9608 		new_readreg = wm_gmii_i82544_readreg;
   9609 		new_writereg = wm_gmii_i82544_writereg;
   9610 	} else if (sc->sc_type >= WM_T_80003) {
   9611 		/* 80003 */
   9612 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9613 		    && (new_phytype != WMPHY_GG82563)
   9614 		    && (new_phytype != WMPHY_UNKNOWN))
   9615 			doubt_phytype = new_phytype;
   9616 		new_phytype = WMPHY_GG82563;
   9617 		new_readreg = wm_gmii_i80003_readreg;
   9618 		new_writereg = wm_gmii_i80003_writereg;
   9619 	} else if (sc->sc_type >= WM_T_I210) {
   9620 		/* I210 and I211 */
   9621 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9622 		    && (new_phytype != WMPHY_I210)
   9623 		    && (new_phytype != WMPHY_UNKNOWN))
   9624 			doubt_phytype = new_phytype;
   9625 		new_phytype = WMPHY_I210;
   9626 		new_readreg = wm_gmii_gs40g_readreg;
   9627 		new_writereg = wm_gmii_gs40g_writereg;
   9628 	} else if (sc->sc_type >= WM_T_82580) {
   9629 		/* 82580, I350 and I354 */
   9630 		new_readreg = wm_gmii_82580_readreg;
   9631 		new_writereg = wm_gmii_82580_writereg;
   9632 	} else if (sc->sc_type >= WM_T_82544) {
   9633 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9634 		new_readreg = wm_gmii_i82544_readreg;
   9635 		new_writereg = wm_gmii_i82544_writereg;
   9636 	} else {
   9637 		new_readreg = wm_gmii_i82543_readreg;
   9638 		new_writereg = wm_gmii_i82543_writereg;
   9639 	}
   9640 
   9641 	if (new_phytype == WMPHY_BM) {
   9642 		/* All BM use _bm_ */
   9643 		new_readreg = wm_gmii_bm_readreg;
   9644 		new_writereg = wm_gmii_bm_writereg;
   9645 	}
   9646 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   9647 		/* All PCH* use _hv_ */
   9648 		new_readreg = wm_gmii_hv_readreg;
   9649 		new_writereg = wm_gmii_hv_writereg;
   9650 	}
   9651 
   9652 	/* Diag output */
   9653 	if (doubt_phytype != WMPHY_UNKNOWN)
   9654 		aprint_error_dev(dev, "Assumed new PHY type was "
   9655 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9656 		    new_phytype);
   9657 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9658 	    && (sc->sc_phytype != new_phytype ))
   9659 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9660 		    "was incorrect. New PHY type = %u\n",
   9661 		    sc->sc_phytype, new_phytype);
   9662 
   9663 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9664 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9665 
   9666 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9667 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9668 		    "function was incorrect.\n");
   9669 
   9670 	/* Update now */
   9671 	sc->sc_phytype = new_phytype;
   9672 	mii->mii_readreg = new_readreg;
   9673 	mii->mii_writereg = new_writereg;
   9674 }
   9675 
   9676 /*
   9677  * wm_get_phy_id_82575:
   9678  *
   9679  * Return PHY ID. Return -1 if it failed.
   9680  */
   9681 static int
   9682 wm_get_phy_id_82575(struct wm_softc *sc)
   9683 {
   9684 	uint32_t reg;
   9685 	int phyid = -1;
   9686 
   9687 	/* XXX */
   9688 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9689 		return -1;
   9690 
   9691 	if (wm_sgmii_uses_mdio(sc)) {
   9692 		switch (sc->sc_type) {
   9693 		case WM_T_82575:
   9694 		case WM_T_82576:
   9695 			reg = CSR_READ(sc, WMREG_MDIC);
   9696 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9697 			break;
   9698 		case WM_T_82580:
   9699 		case WM_T_I350:
   9700 		case WM_T_I354:
   9701 		case WM_T_I210:
   9702 		case WM_T_I211:
   9703 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9704 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9705 			break;
   9706 		default:
   9707 			return -1;
   9708 		}
   9709 	}
   9710 
   9711 	return phyid;
   9712 }
   9713 
   9714 
   9715 /*
   9716  * wm_gmii_mediainit:
   9717  *
   9718  *	Initialize media for use on 1000BASE-T devices.
   9719  */
   9720 static void
   9721 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9722 {
   9723 	device_t dev = sc->sc_dev;
   9724 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9725 	struct mii_data *mii = &sc->sc_mii;
   9726 	uint32_t reg;
   9727 
   9728 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9729 		device_xname(sc->sc_dev), __func__));
   9730 
   9731 	/* We have GMII. */
   9732 	sc->sc_flags |= WM_F_HAS_MII;
   9733 
   9734 	if (sc->sc_type == WM_T_80003)
   9735 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9736 	else
   9737 		sc->sc_tipg = TIPG_1000T_DFLT;
   9738 
   9739 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9740 	if ((sc->sc_type == WM_T_82580)
   9741 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9742 	    || (sc->sc_type == WM_T_I211)) {
   9743 		reg = CSR_READ(sc, WMREG_PHPM);
   9744 		reg &= ~PHPM_GO_LINK_D;
   9745 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9746 	}
   9747 
   9748 	/*
   9749 	 * Let the chip set speed/duplex on its own based on
   9750 	 * signals from the PHY.
   9751 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9752 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9753 	 */
   9754 	sc->sc_ctrl |= CTRL_SLU;
   9755 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9756 
   9757 	/* Initialize our media structures and probe the GMII. */
   9758 	mii->mii_ifp = ifp;
   9759 
   9760 	mii->mii_statchg = wm_gmii_statchg;
   9761 
   9762 	/* get PHY control from SMBus to PCIe */
   9763 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9764 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   9765 	    || (sc->sc_type == WM_T_PCH_CNP))
   9766 		wm_smbustopci(sc);
   9767 
   9768 	wm_gmii_reset(sc);
   9769 
   9770 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9771 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9772 	    wm_gmii_mediastatus);
   9773 
   9774 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9775 	    || (sc->sc_type == WM_T_82580)
   9776 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9777 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9778 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9779 			/* Attach only one port */
   9780 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9781 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9782 		} else {
   9783 			int i, id;
   9784 			uint32_t ctrl_ext;
   9785 
   9786 			id = wm_get_phy_id_82575(sc);
   9787 			if (id != -1) {
   9788 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9789 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9790 			}
   9791 			if ((id == -1)
   9792 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9793 				/* Power on sgmii phy if it is disabled */
   9794 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9795 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9796 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9797 				CSR_WRITE_FLUSH(sc);
   9798 				delay(300*1000); /* XXX too long */
   9799 
   9800 				/* from 1 to 8 */
   9801 				for (i = 1; i < 8; i++)
   9802 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9803 					    0xffffffff, i, MII_OFFSET_ANY,
   9804 					    MIIF_DOPAUSE);
   9805 
   9806 				/* restore previous sfp cage power state */
   9807 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9808 			}
   9809 		}
   9810 	} else {
   9811 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9812 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9813 	}
   9814 
   9815 	/*
   9816 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9817 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9818 	 */
   9819 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   9820 		|| (sc->sc_type == WM_T_PCH_SPT)
   9821 		|| (sc->sc_type == WM_T_PCH_CNP))
   9822 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9823 		wm_set_mdio_slow_mode_hv(sc);
   9824 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9825 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9826 	}
   9827 
   9828 	/*
   9829 	 * (For ICH8 variants)
   9830 	 * If PHY detection failed, use BM's r/w function and retry.
   9831 	 */
   9832 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9833 		/* if failed, retry with *_bm_* */
   9834 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9835 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9836 		    sc->sc_phytype);
   9837 		sc->sc_phytype = WMPHY_BM;
   9838 		mii->mii_readreg = wm_gmii_bm_readreg;
   9839 		mii->mii_writereg = wm_gmii_bm_writereg;
   9840 
   9841 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9842 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9843 	}
   9844 
   9845 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9846 		/* Any PHY wasn't find */
   9847 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9848 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9849 		sc->sc_phytype = WMPHY_NONE;
   9850 	} else {
   9851 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9852 
   9853 		/*
   9854 		 * PHY Found! Check PHY type again by the second call of
   9855 		 * wm_gmii_setup_phytype.
   9856 		 */
   9857 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9858 		    child->mii_mpd_model);
   9859 
   9860 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9861 	}
   9862 }
   9863 
   9864 /*
   9865  * wm_gmii_mediachange:	[ifmedia interface function]
   9866  *
   9867  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9868  */
   9869 static int
   9870 wm_gmii_mediachange(struct ifnet *ifp)
   9871 {
   9872 	struct wm_softc *sc = ifp->if_softc;
   9873 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9874 	int rc;
   9875 
   9876 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9877 		device_xname(sc->sc_dev), __func__));
   9878 	if ((ifp->if_flags & IFF_UP) == 0)
   9879 		return 0;
   9880 
   9881 	/* Disable D0 LPLU. */
   9882 	wm_lplu_d0_disable(sc);
   9883 
   9884 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9885 	sc->sc_ctrl |= CTRL_SLU;
   9886 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9887 	    || (sc->sc_type > WM_T_82543)) {
   9888 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9889 	} else {
   9890 		sc->sc_ctrl &= ~CTRL_ASDE;
   9891 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9892 		if (ife->ifm_media & IFM_FDX)
   9893 			sc->sc_ctrl |= CTRL_FD;
   9894 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9895 		case IFM_10_T:
   9896 			sc->sc_ctrl |= CTRL_SPEED_10;
   9897 			break;
   9898 		case IFM_100_TX:
   9899 			sc->sc_ctrl |= CTRL_SPEED_100;
   9900 			break;
   9901 		case IFM_1000_T:
   9902 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9903 			break;
   9904 		default:
   9905 			panic("wm_gmii_mediachange: bad media 0x%x",
   9906 			    ife->ifm_media);
   9907 		}
   9908 	}
   9909 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9910 	CSR_WRITE_FLUSH(sc);
   9911 	if (sc->sc_type <= WM_T_82543)
   9912 		wm_gmii_reset(sc);
   9913 
   9914 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9915 		return 0;
   9916 	return rc;
   9917 }
   9918 
   9919 /*
   9920  * wm_gmii_mediastatus:	[ifmedia interface function]
   9921  *
   9922  *	Get the current interface media status on a 1000BASE-T device.
   9923  */
   9924 static void
   9925 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9926 {
   9927 	struct wm_softc *sc = ifp->if_softc;
   9928 
   9929 	ether_mediastatus(ifp, ifmr);
   9930 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9931 	    | sc->sc_flowflags;
   9932 }
   9933 
   9934 #define	MDI_IO		CTRL_SWDPIN(2)
   9935 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9936 #define	MDI_CLK		CTRL_SWDPIN(3)
   9937 
   9938 static void
   9939 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9940 {
   9941 	uint32_t i, v;
   9942 
   9943 	v = CSR_READ(sc, WMREG_CTRL);
   9944 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9945 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9946 
   9947 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9948 		if (data & i)
   9949 			v |= MDI_IO;
   9950 		else
   9951 			v &= ~MDI_IO;
   9952 		CSR_WRITE(sc, WMREG_CTRL, v);
   9953 		CSR_WRITE_FLUSH(sc);
   9954 		delay(10);
   9955 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9956 		CSR_WRITE_FLUSH(sc);
   9957 		delay(10);
   9958 		CSR_WRITE(sc, WMREG_CTRL, v);
   9959 		CSR_WRITE_FLUSH(sc);
   9960 		delay(10);
   9961 	}
   9962 }
   9963 
   9964 static uint32_t
   9965 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9966 {
   9967 	uint32_t v, i, data = 0;
   9968 
   9969 	v = CSR_READ(sc, WMREG_CTRL);
   9970 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9971 	v |= CTRL_SWDPIO(3);
   9972 
   9973 	CSR_WRITE(sc, WMREG_CTRL, v);
   9974 	CSR_WRITE_FLUSH(sc);
   9975 	delay(10);
   9976 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9977 	CSR_WRITE_FLUSH(sc);
   9978 	delay(10);
   9979 	CSR_WRITE(sc, WMREG_CTRL, v);
   9980 	CSR_WRITE_FLUSH(sc);
   9981 	delay(10);
   9982 
   9983 	for (i = 0; i < 16; i++) {
   9984 		data <<= 1;
   9985 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9986 		CSR_WRITE_FLUSH(sc);
   9987 		delay(10);
   9988 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9989 			data |= 1;
   9990 		CSR_WRITE(sc, WMREG_CTRL, v);
   9991 		CSR_WRITE_FLUSH(sc);
   9992 		delay(10);
   9993 	}
   9994 
   9995 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9996 	CSR_WRITE_FLUSH(sc);
   9997 	delay(10);
   9998 	CSR_WRITE(sc, WMREG_CTRL, v);
   9999 	CSR_WRITE_FLUSH(sc);
   10000 	delay(10);
   10001 
   10002 	return data;
   10003 }
   10004 
   10005 #undef MDI_IO
   10006 #undef MDI_DIR
   10007 #undef MDI_CLK
   10008 
   10009 /*
   10010  * wm_gmii_i82543_readreg:	[mii interface function]
   10011  *
   10012  *	Read a PHY register on the GMII (i82543 version).
   10013  */
   10014 static int
   10015 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   10016 {
   10017 	struct wm_softc *sc = device_private(dev);
   10018 	int rv;
   10019 
   10020 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10021 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10022 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10023 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   10024 
   10025 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   10026 		device_xname(dev), phy, reg, rv));
   10027 
   10028 	return rv;
   10029 }
   10030 
   10031 /*
   10032  * wm_gmii_i82543_writereg:	[mii interface function]
   10033  *
   10034  *	Write a PHY register on the GMII (i82543 version).
   10035  */
   10036 static void
   10037 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   10038 {
   10039 	struct wm_softc *sc = device_private(dev);
   10040 
   10041 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10042 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10043 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10044 	    (MII_COMMAND_START << 30), 32);
   10045 }
   10046 
   10047 /*
   10048  * wm_gmii_mdic_readreg:	[mii interface function]
   10049  *
   10050  *	Read a PHY register on the GMII.
   10051  */
   10052 static int
   10053 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   10054 {
   10055 	struct wm_softc *sc = device_private(dev);
   10056 	uint32_t mdic = 0;
   10057 	int i, rv;
   10058 
   10059 	if (reg > MII_ADDRMASK) {
   10060 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10061 		    __func__, sc->sc_phytype, reg);
   10062 		reg &= MII_ADDRMASK;
   10063 	}
   10064 
   10065 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10066 	    MDIC_REGADD(reg));
   10067 
   10068 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10069 		mdic = CSR_READ(sc, WMREG_MDIC);
   10070 		if (mdic & MDIC_READY)
   10071 			break;
   10072 		delay(50);
   10073 	}
   10074 
   10075 	if ((mdic & MDIC_READY) == 0) {
   10076 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   10077 		    device_xname(dev), phy, reg);
   10078 		rv = 0;
   10079 	} else if (mdic & MDIC_E) {
   10080 #if 0 /* This is normal if no PHY is present. */
   10081 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   10082 		    device_xname(dev), phy, reg);
   10083 #endif
   10084 		rv = 0;
   10085 	} else {
   10086 		rv = MDIC_DATA(mdic);
   10087 		if (rv == 0xffff)
   10088 			rv = 0;
   10089 	}
   10090 
   10091 	return rv;
   10092 }
   10093 
   10094 /*
   10095  * wm_gmii_mdic_writereg:	[mii interface function]
   10096  *
   10097  *	Write a PHY register on the GMII.
   10098  */
   10099 static void
   10100 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   10101 {
   10102 	struct wm_softc *sc = device_private(dev);
   10103 	uint32_t mdic = 0;
   10104 	int i;
   10105 
   10106 	if (reg > MII_ADDRMASK) {
   10107 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10108 		    __func__, sc->sc_phytype, reg);
   10109 		reg &= MII_ADDRMASK;
   10110 	}
   10111 
   10112 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10113 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10114 
   10115 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10116 		mdic = CSR_READ(sc, WMREG_MDIC);
   10117 		if (mdic & MDIC_READY)
   10118 			break;
   10119 		delay(50);
   10120 	}
   10121 
   10122 	if ((mdic & MDIC_READY) == 0)
   10123 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   10124 		    device_xname(dev), phy, reg);
   10125 	else if (mdic & MDIC_E)
   10126 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   10127 		    device_xname(dev), phy, reg);
   10128 }
   10129 
   10130 /*
   10131  * wm_gmii_i82544_readreg:	[mii interface function]
   10132  *
   10133  *	Read a PHY register on the GMII.
   10134  */
   10135 static int
   10136 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   10137 {
   10138 	struct wm_softc *sc = device_private(dev);
   10139 	int rv;
   10140 
   10141 	if (sc->phy.acquire(sc)) {
   10142 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10143 		return 0;
   10144 	}
   10145 
   10146 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10147 		switch (sc->sc_phytype) {
   10148 		case WMPHY_IGP:
   10149 		case WMPHY_IGP_2:
   10150 		case WMPHY_IGP_3:
   10151 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10152 			    reg);
   10153 			break;
   10154 		default:
   10155 #ifdef WM_DEBUG
   10156 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10157 			    __func__, sc->sc_phytype, reg);
   10158 #endif
   10159 			break;
   10160 		}
   10161 	}
   10162 
   10163 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10164 	sc->phy.release(sc);
   10165 
   10166 	return rv;
   10167 }
   10168 
   10169 /*
   10170  * wm_gmii_i82544_writereg:	[mii interface function]
   10171  *
   10172  *	Write a PHY register on the GMII.
   10173  */
   10174 static void
   10175 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   10176 {
   10177 	struct wm_softc *sc = device_private(dev);
   10178 
   10179 	if (sc->phy.acquire(sc)) {
   10180 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10181 		return;
   10182 	}
   10183 
   10184 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10185 		switch (sc->sc_phytype) {
   10186 		case WMPHY_IGP:
   10187 		case WMPHY_IGP_2:
   10188 		case WMPHY_IGP_3:
   10189 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10190 			    reg);
   10191 			break;
   10192 		default:
   10193 #ifdef WM_DEBUG
   10194 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10195 			    __func__, sc->sc_phytype, reg);
   10196 #endif
   10197 			break;
   10198 		}
   10199 	}
   10200 
   10201 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10202 	sc->phy.release(sc);
   10203 }
   10204 
   10205 /*
   10206  * wm_gmii_i80003_readreg:	[mii interface function]
   10207  *
   10208  *	Read a PHY register on the kumeran
   10209  * This could be handled by the PHY layer if we didn't have to lock the
   10210  * ressource ...
   10211  */
   10212 static int
   10213 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   10214 {
   10215 	struct wm_softc *sc = device_private(dev);
   10216 	int page_select, temp;
   10217 	int rv;
   10218 
   10219 	if (phy != 1) /* only one PHY on kumeran bus */
   10220 		return 0;
   10221 
   10222 	if (sc->phy.acquire(sc)) {
   10223 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10224 		return 0;
   10225 	}
   10226 
   10227 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10228 		page_select = GG82563_PHY_PAGE_SELECT;
   10229 	else {
   10230 		/*
   10231 		 * Use Alternative Page Select register to access registers
   10232 		 * 30 and 31.
   10233 		 */
   10234 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10235 	}
   10236 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10237 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10238 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10239 		/*
   10240 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10241 		 * register.
   10242 		 */
   10243 		delay(200);
   10244 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10245 			device_printf(dev, "%s failed\n", __func__);
   10246 			rv = 0; /* XXX */
   10247 			goto out;
   10248 		}
   10249 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10250 		delay(200);
   10251 	} else
   10252 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10253 
   10254 out:
   10255 	sc->phy.release(sc);
   10256 	return rv;
   10257 }
   10258 
   10259 /*
   10260  * wm_gmii_i80003_writereg:	[mii interface function]
   10261  *
   10262  *	Write a PHY register on the kumeran.
   10263  * This could be handled by the PHY layer if we didn't have to lock the
   10264  * ressource ...
   10265  */
   10266 static void
   10267 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10268 {
   10269 	struct wm_softc *sc = device_private(dev);
   10270 	int page_select, temp;
   10271 
   10272 	if (phy != 1) /* only one PHY on kumeran bus */
   10273 		return;
   10274 
   10275 	if (sc->phy.acquire(sc)) {
   10276 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10277 		return;
   10278 	}
   10279 
   10280 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10281 		page_select = GG82563_PHY_PAGE_SELECT;
   10282 	else {
   10283 		/*
   10284 		 * Use Alternative Page Select register to access registers
   10285 		 * 30 and 31.
   10286 		 */
   10287 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10288 	}
   10289 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10290 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10291 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10292 		/*
   10293 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10294 		 * register.
   10295 		 */
   10296 		delay(200);
   10297 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10298 			device_printf(dev, "%s failed\n", __func__);
   10299 			goto out;
   10300 		}
   10301 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10302 		delay(200);
   10303 	} else
   10304 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10305 
   10306 out:
   10307 	sc->phy.release(sc);
   10308 }
   10309 
   10310 /*
   10311  * wm_gmii_bm_readreg:	[mii interface function]
   10312  *
   10313  *	Read a PHY register on the kumeran
   10314  * This could be handled by the PHY layer if we didn't have to lock the
   10315  * ressource ...
   10316  */
   10317 static int
   10318 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10319 {
   10320 	struct wm_softc *sc = device_private(dev);
   10321 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10322 	uint16_t val;
   10323 	int rv;
   10324 
   10325 	if (sc->phy.acquire(sc)) {
   10326 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10327 		return 0;
   10328 	}
   10329 
   10330 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10331 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10332 		    || (reg == 31)) ? 1 : phy;
   10333 	/* Page 800 works differently than the rest so it has its own func */
   10334 	if (page == BM_WUC_PAGE) {
   10335 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10336 		rv = val;
   10337 		goto release;
   10338 	}
   10339 
   10340 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10341 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10342 		    && (sc->sc_type != WM_T_82583))
   10343 			wm_gmii_mdic_writereg(dev, phy,
   10344 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10345 		else
   10346 			wm_gmii_mdic_writereg(dev, phy,
   10347 			    BME1000_PHY_PAGE_SELECT, page);
   10348 	}
   10349 
   10350 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10351 
   10352 release:
   10353 	sc->phy.release(sc);
   10354 	return rv;
   10355 }
   10356 
   10357 /*
   10358  * wm_gmii_bm_writereg:	[mii interface function]
   10359  *
   10360  *	Write a PHY register on the kumeran.
   10361  * This could be handled by the PHY layer if we didn't have to lock the
   10362  * ressource ...
   10363  */
   10364 static void
   10365 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10366 {
   10367 	struct wm_softc *sc = device_private(dev);
   10368 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10369 
   10370 	if (sc->phy.acquire(sc)) {
   10371 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10372 		return;
   10373 	}
   10374 
   10375 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10376 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10377 		    || (reg == 31)) ? 1 : phy;
   10378 	/* Page 800 works differently than the rest so it has its own func */
   10379 	if (page == BM_WUC_PAGE) {
   10380 		uint16_t tmp;
   10381 
   10382 		tmp = val;
   10383 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10384 		goto release;
   10385 	}
   10386 
   10387 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10388 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10389 		    && (sc->sc_type != WM_T_82583))
   10390 			wm_gmii_mdic_writereg(dev, phy,
   10391 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10392 		else
   10393 			wm_gmii_mdic_writereg(dev, phy,
   10394 			    BME1000_PHY_PAGE_SELECT, page);
   10395 	}
   10396 
   10397 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10398 
   10399 release:
   10400 	sc->phy.release(sc);
   10401 }
   10402 
   10403 static void
   10404 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10405 {
   10406 	struct wm_softc *sc = device_private(dev);
   10407 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10408 	uint16_t wuce, reg;
   10409 
   10410 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10411 		device_xname(dev), __func__));
   10412 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10413 	if (sc->sc_type == WM_T_PCH) {
   10414 		/* XXX e1000 driver do nothing... why? */
   10415 	}
   10416 
   10417 	/*
   10418 	 * 1) Enable PHY wakeup register first.
   10419 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10420 	 */
   10421 
   10422 	/* Set page 769 */
   10423 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10424 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10425 
   10426 	/* Read WUCE and save it */
   10427 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10428 
   10429 	reg = wuce | BM_WUC_ENABLE_BIT;
   10430 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10431 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10432 
   10433 	/* Select page 800 */
   10434 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10435 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10436 
   10437 	/*
   10438 	 * 2) Access PHY wakeup register.
   10439 	 * See e1000_access_phy_wakeup_reg_bm.
   10440 	 */
   10441 
   10442 	/* Write page 800 */
   10443 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10444 
   10445 	if (rd)
   10446 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10447 	else
   10448 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10449 
   10450 	/*
   10451 	 * 3) Disable PHY wakeup register.
   10452 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10453 	 */
   10454 	/* Set page 769 */
   10455 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10456 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10457 
   10458 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10459 }
   10460 
   10461 /*
   10462  * wm_gmii_hv_readreg:	[mii interface function]
   10463  *
   10464  *	Read a PHY register on the kumeran
   10465  * This could be handled by the PHY layer if we didn't have to lock the
   10466  * ressource ...
   10467  */
   10468 static int
   10469 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10470 {
   10471 	struct wm_softc *sc = device_private(dev);
   10472 	int rv;
   10473 
   10474 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10475 		device_xname(dev), __func__));
   10476 	if (sc->phy.acquire(sc)) {
   10477 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10478 		return 0;
   10479 	}
   10480 
   10481 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10482 	sc->phy.release(sc);
   10483 	return rv;
   10484 }
   10485 
   10486 static int
   10487 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10488 {
   10489 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10490 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10491 	uint16_t val;
   10492 	int rv;
   10493 
   10494 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10495 
   10496 	/* Page 800 works differently than the rest so it has its own func */
   10497 	if (page == BM_WUC_PAGE) {
   10498 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10499 		return val;
   10500 	}
   10501 
   10502 	/*
   10503 	 * Lower than page 768 works differently than the rest so it has its
   10504 	 * own func
   10505 	 */
   10506 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10507 		printf("gmii_hv_readreg!!!\n");
   10508 		return 0;
   10509 	}
   10510 
   10511 	/*
   10512 	 * XXX I21[789] documents say that the SMBus Address register is at
   10513 	 * PHY address 01, Page 0 (not 768), Register 26.
   10514 	 */
   10515 	if (page == HV_INTC_FC_PAGE_START)
   10516 		page = 0;
   10517 
   10518 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10519 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10520 		    page << BME1000_PAGE_SHIFT);
   10521 	}
   10522 
   10523 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10524 	return rv;
   10525 }
   10526 
   10527 /*
   10528  * wm_gmii_hv_writereg:	[mii interface function]
   10529  *
   10530  *	Write a PHY register on the kumeran.
   10531  * This could be handled by the PHY layer if we didn't have to lock the
   10532  * ressource ...
   10533  */
   10534 static void
   10535 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10536 {
   10537 	struct wm_softc *sc = device_private(dev);
   10538 
   10539 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10540 		device_xname(dev), __func__));
   10541 
   10542 	if (sc->phy.acquire(sc)) {
   10543 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10544 		return;
   10545 	}
   10546 
   10547 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10548 	sc->phy.release(sc);
   10549 }
   10550 
   10551 static void
   10552 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10553 {
   10554 	struct wm_softc *sc = device_private(dev);
   10555 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10556 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10557 
   10558 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10559 
   10560 	/* Page 800 works differently than the rest so it has its own func */
   10561 	if (page == BM_WUC_PAGE) {
   10562 		uint16_t tmp;
   10563 
   10564 		tmp = val;
   10565 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10566 		return;
   10567 	}
   10568 
   10569 	/*
   10570 	 * Lower than page 768 works differently than the rest so it has its
   10571 	 * own func
   10572 	 */
   10573 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10574 		printf("gmii_hv_writereg!!!\n");
   10575 		return;
   10576 	}
   10577 
   10578 	{
   10579 		/*
   10580 		 * XXX I21[789] documents say that the SMBus Address register
   10581 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10582 		 */
   10583 		if (page == HV_INTC_FC_PAGE_START)
   10584 			page = 0;
   10585 
   10586 		/*
   10587 		 * XXX Workaround MDIO accesses being disabled after entering
   10588 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10589 		 * register is set)
   10590 		 */
   10591 		if (sc->sc_phytype == WMPHY_82578) {
   10592 			struct mii_softc *child;
   10593 
   10594 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10595 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10596 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10597 			    && ((val & (1 << 11)) != 0)) {
   10598 				printf("XXX need workaround\n");
   10599 			}
   10600 		}
   10601 
   10602 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10603 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10604 			    page << BME1000_PAGE_SHIFT);
   10605 		}
   10606 	}
   10607 
   10608 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10609 }
   10610 
   10611 /*
   10612  * wm_gmii_82580_readreg:	[mii interface function]
   10613  *
   10614  *	Read a PHY register on the 82580 and I350.
   10615  * This could be handled by the PHY layer if we didn't have to lock the
   10616  * ressource ...
   10617  */
   10618 static int
   10619 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10620 {
   10621 	struct wm_softc *sc = device_private(dev);
   10622 	int rv;
   10623 
   10624 	if (sc->phy.acquire(sc) != 0) {
   10625 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10626 		return 0;
   10627 	}
   10628 
   10629 #ifdef DIAGNOSTIC
   10630 	if (reg > MII_ADDRMASK) {
   10631 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10632 		    __func__, sc->sc_phytype, reg);
   10633 		reg &= MII_ADDRMASK;
   10634 	}
   10635 #endif
   10636 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10637 
   10638 	sc->phy.release(sc);
   10639 	return rv;
   10640 }
   10641 
   10642 /*
   10643  * wm_gmii_82580_writereg:	[mii interface function]
   10644  *
   10645  *	Write a PHY register on the 82580 and I350.
   10646  * This could be handled by the PHY layer if we didn't have to lock the
   10647  * ressource ...
   10648  */
   10649 static void
   10650 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10651 {
   10652 	struct wm_softc *sc = device_private(dev);
   10653 
   10654 	if (sc->phy.acquire(sc) != 0) {
   10655 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10656 		return;
   10657 	}
   10658 
   10659 #ifdef DIAGNOSTIC
   10660 	if (reg > MII_ADDRMASK) {
   10661 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10662 		    __func__, sc->sc_phytype, reg);
   10663 		reg &= MII_ADDRMASK;
   10664 	}
   10665 #endif
   10666 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10667 
   10668 	sc->phy.release(sc);
   10669 }
   10670 
   10671 /*
   10672  * wm_gmii_gs40g_readreg:	[mii interface function]
   10673  *
   10674  *	Read a PHY register on the I2100 and I211.
   10675  * This could be handled by the PHY layer if we didn't have to lock the
   10676  * ressource ...
   10677  */
   10678 static int
   10679 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10680 {
   10681 	struct wm_softc *sc = device_private(dev);
   10682 	int page, offset;
   10683 	int rv;
   10684 
   10685 	/* Acquire semaphore */
   10686 	if (sc->phy.acquire(sc)) {
   10687 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10688 		return 0;
   10689 	}
   10690 
   10691 	/* Page select */
   10692 	page = reg >> GS40G_PAGE_SHIFT;
   10693 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10694 
   10695 	/* Read reg */
   10696 	offset = reg & GS40G_OFFSET_MASK;
   10697 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10698 
   10699 	sc->phy.release(sc);
   10700 	return rv;
   10701 }
   10702 
   10703 /*
   10704  * wm_gmii_gs40g_writereg:	[mii interface function]
   10705  *
   10706  *	Write a PHY register on the I210 and I211.
   10707  * This could be handled by the PHY layer if we didn't have to lock the
   10708  * ressource ...
   10709  */
   10710 static void
   10711 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10712 {
   10713 	struct wm_softc *sc = device_private(dev);
   10714 	int page, offset;
   10715 
   10716 	/* Acquire semaphore */
   10717 	if (sc->phy.acquire(sc)) {
   10718 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10719 		return;
   10720 	}
   10721 
   10722 	/* Page select */
   10723 	page = reg >> GS40G_PAGE_SHIFT;
   10724 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10725 
   10726 	/* Write reg */
   10727 	offset = reg & GS40G_OFFSET_MASK;
   10728 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10729 
   10730 	/* Release semaphore */
   10731 	sc->phy.release(sc);
   10732 }
   10733 
   10734 /*
   10735  * wm_gmii_statchg:	[mii interface function]
   10736  *
   10737  *	Callback from MII layer when media changes.
   10738  */
   10739 static void
   10740 wm_gmii_statchg(struct ifnet *ifp)
   10741 {
   10742 	struct wm_softc *sc = ifp->if_softc;
   10743 	struct mii_data *mii = &sc->sc_mii;
   10744 
   10745 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10746 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10747 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10748 
   10749 	/*
   10750 	 * Get flow control negotiation result.
   10751 	 */
   10752 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10753 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10754 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10755 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10756 	}
   10757 
   10758 	if (sc->sc_flowflags & IFM_FLOW) {
   10759 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10760 			sc->sc_ctrl |= CTRL_TFCE;
   10761 			sc->sc_fcrtl |= FCRTL_XONE;
   10762 		}
   10763 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10764 			sc->sc_ctrl |= CTRL_RFCE;
   10765 	}
   10766 
   10767 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10768 		DPRINTF(WM_DEBUG_LINK,
   10769 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10770 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10771 	} else {
   10772 		DPRINTF(WM_DEBUG_LINK,
   10773 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10774 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10775 	}
   10776 
   10777 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10778 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10779 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10780 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10781 	if (sc->sc_type == WM_T_80003) {
   10782 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10783 		case IFM_1000_T:
   10784 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10785 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10786 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10787 			break;
   10788 		default:
   10789 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10790 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10791 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10792 			break;
   10793 		}
   10794 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10795 	}
   10796 }
   10797 
   10798 /* kumeran related (80003, ICH* and PCH*) */
   10799 
   10800 /*
   10801  * wm_kmrn_readreg:
   10802  *
   10803  *	Read a kumeran register
   10804  */
   10805 static int
   10806 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10807 {
   10808 	int rv;
   10809 
   10810 	if (sc->sc_type == WM_T_80003)
   10811 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10812 	else
   10813 		rv = sc->phy.acquire(sc);
   10814 	if (rv != 0) {
   10815 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10816 		    __func__);
   10817 		return rv;
   10818 	}
   10819 
   10820 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   10821 
   10822 	if (sc->sc_type == WM_T_80003)
   10823 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10824 	else
   10825 		sc->phy.release(sc);
   10826 
   10827 	return rv;
   10828 }
   10829 
   10830 static int
   10831 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   10832 {
   10833 
   10834 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10835 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10836 	    KUMCTRLSTA_REN);
   10837 	CSR_WRITE_FLUSH(sc);
   10838 	delay(2);
   10839 
   10840 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10841 
   10842 	return 0;
   10843 }
   10844 
   10845 /*
   10846  * wm_kmrn_writereg:
   10847  *
   10848  *	Write a kumeran register
   10849  */
   10850 static int
   10851 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   10852 {
   10853 	int rv;
   10854 
   10855 	if (sc->sc_type == WM_T_80003)
   10856 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10857 	else
   10858 		rv = sc->phy.acquire(sc);
   10859 	if (rv != 0) {
   10860 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10861 		    __func__);
   10862 		return rv;
   10863 	}
   10864 
   10865 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   10866 
   10867 	if (sc->sc_type == WM_T_80003)
   10868 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10869 	else
   10870 		sc->phy.release(sc);
   10871 
   10872 	return rv;
   10873 }
   10874 
   10875 static int
   10876 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   10877 {
   10878 
   10879 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10880 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   10881 
   10882 	return 0;
   10883 }
   10884 
   10885 /* SGMII related */
   10886 
   10887 /*
   10888  * wm_sgmii_uses_mdio
   10889  *
   10890  * Check whether the transaction is to the internal PHY or the external
   10891  * MDIO interface. Return true if it's MDIO.
   10892  */
   10893 static bool
   10894 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10895 {
   10896 	uint32_t reg;
   10897 	bool ismdio = false;
   10898 
   10899 	switch (sc->sc_type) {
   10900 	case WM_T_82575:
   10901 	case WM_T_82576:
   10902 		reg = CSR_READ(sc, WMREG_MDIC);
   10903 		ismdio = ((reg & MDIC_DEST) != 0);
   10904 		break;
   10905 	case WM_T_82580:
   10906 	case WM_T_I350:
   10907 	case WM_T_I354:
   10908 	case WM_T_I210:
   10909 	case WM_T_I211:
   10910 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10911 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10912 		break;
   10913 	default:
   10914 		break;
   10915 	}
   10916 
   10917 	return ismdio;
   10918 }
   10919 
   10920 /*
   10921  * wm_sgmii_readreg:	[mii interface function]
   10922  *
   10923  *	Read a PHY register on the SGMII
   10924  * This could be handled by the PHY layer if we didn't have to lock the
   10925  * ressource ...
   10926  */
   10927 static int
   10928 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10929 {
   10930 	struct wm_softc *sc = device_private(dev);
   10931 	uint32_t i2ccmd;
   10932 	int i, rv;
   10933 
   10934 	if (sc->phy.acquire(sc)) {
   10935 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10936 		return 0;
   10937 	}
   10938 
   10939 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10940 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10941 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10942 
   10943 	/* Poll the ready bit */
   10944 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10945 		delay(50);
   10946 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10947 		if (i2ccmd & I2CCMD_READY)
   10948 			break;
   10949 	}
   10950 	if ((i2ccmd & I2CCMD_READY) == 0)
   10951 		device_printf(dev, "I2CCMD Read did not complete\n");
   10952 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10953 		device_printf(dev, "I2CCMD Error bit set\n");
   10954 
   10955 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10956 
   10957 	sc->phy.release(sc);
   10958 	return rv;
   10959 }
   10960 
   10961 /*
   10962  * wm_sgmii_writereg:	[mii interface function]
   10963  *
   10964  *	Write a PHY register on the SGMII.
   10965  * This could be handled by the PHY layer if we didn't have to lock the
   10966  * ressource ...
   10967  */
   10968 static void
   10969 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   10970 {
   10971 	struct wm_softc *sc = device_private(dev);
   10972 	uint32_t i2ccmd;
   10973 	int i;
   10974 	int swapdata;
   10975 
   10976 	if (sc->phy.acquire(sc) != 0) {
   10977 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10978 		return;
   10979 	}
   10980 	/* Swap the data bytes for the I2C interface */
   10981 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10982 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10983 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   10984 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10985 
   10986 	/* Poll the ready bit */
   10987 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10988 		delay(50);
   10989 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10990 		if (i2ccmd & I2CCMD_READY)
   10991 			break;
   10992 	}
   10993 	if ((i2ccmd & I2CCMD_READY) == 0)
   10994 		device_printf(dev, "I2CCMD Write did not complete\n");
   10995 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10996 		device_printf(dev, "I2CCMD Error bit set\n");
   10997 
   10998 	sc->phy.release(sc);
   10999 }
   11000 
   11001 /* TBI related */
   11002 
   11003 static bool
   11004 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11005 {
   11006 	bool sig;
   11007 
   11008 	sig = ctrl & CTRL_SWDPIN(1);
   11009 
   11010 	/*
   11011 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11012 	 * detect a signal, 1 if they don't.
   11013 	 */
   11014 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11015 		sig = !sig;
   11016 
   11017 	return sig;
   11018 }
   11019 
   11020 /*
   11021  * wm_tbi_mediainit:
   11022  *
   11023  *	Initialize media for use on 1000BASE-X devices.
   11024  */
   11025 static void
   11026 wm_tbi_mediainit(struct wm_softc *sc)
   11027 {
   11028 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11029 	const char *sep = "";
   11030 
   11031 	if (sc->sc_type < WM_T_82543)
   11032 		sc->sc_tipg = TIPG_WM_DFLT;
   11033 	else
   11034 		sc->sc_tipg = TIPG_LG_DFLT;
   11035 
   11036 	sc->sc_tbi_serdes_anegticks = 5;
   11037 
   11038 	/* Initialize our media structures */
   11039 	sc->sc_mii.mii_ifp = ifp;
   11040 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11041 
   11042 	if ((sc->sc_type >= WM_T_82575)
   11043 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11044 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11045 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11046 	else
   11047 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11048 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11049 
   11050 	/*
   11051 	 * SWD Pins:
   11052 	 *
   11053 	 *	0 = Link LED (output)
   11054 	 *	1 = Loss Of Signal (input)
   11055 	 */
   11056 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11057 
   11058 	/* XXX Perhaps this is only for TBI */
   11059 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11060 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11061 
   11062 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11063 		sc->sc_ctrl &= ~CTRL_LRST;
   11064 
   11065 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11066 
   11067 #define	ADD(ss, mm, dd)							\
   11068 do {									\
   11069 	aprint_normal("%s%s", sep, ss);					\
   11070 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11071 	sep = ", ";							\
   11072 } while (/*CONSTCOND*/0)
   11073 
   11074 	aprint_normal_dev(sc->sc_dev, "");
   11075 
   11076 	if (sc->sc_type == WM_T_I354) {
   11077 		uint32_t status;
   11078 
   11079 		status = CSR_READ(sc, WMREG_STATUS);
   11080 		if (((status & STATUS_2P5_SKU) != 0)
   11081 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11082 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11083 		} else
   11084 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11085 	} else if (sc->sc_type == WM_T_82545) {
   11086 		/* Only 82545 is LX (XXX except SFP) */
   11087 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11088 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11089 	} else {
   11090 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11091 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11092 	}
   11093 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11094 	aprint_normal("\n");
   11095 
   11096 #undef ADD
   11097 
   11098 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11099 }
   11100 
   11101 /*
   11102  * wm_tbi_mediachange:	[ifmedia interface function]
   11103  *
   11104  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11105  */
   11106 static int
   11107 wm_tbi_mediachange(struct ifnet *ifp)
   11108 {
   11109 	struct wm_softc *sc = ifp->if_softc;
   11110 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11111 	uint32_t status, ctrl;
   11112 	bool signal;
   11113 	int i;
   11114 
   11115 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11116 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11117 		/* XXX need some work for >= 82571 and < 82575 */
   11118 		if (sc->sc_type < WM_T_82575)
   11119 			return 0;
   11120 	}
   11121 
   11122 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11123 	    || (sc->sc_type >= WM_T_82575))
   11124 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11125 
   11126 	sc->sc_ctrl &= ~CTRL_LRST;
   11127 	sc->sc_txcw = TXCW_ANE;
   11128 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11129 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11130 	else if (ife->ifm_media & IFM_FDX)
   11131 		sc->sc_txcw |= TXCW_FD;
   11132 	else
   11133 		sc->sc_txcw |= TXCW_HD;
   11134 
   11135 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11136 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11137 
   11138 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11139 		device_xname(sc->sc_dev), sc->sc_txcw));
   11140 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11141 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11142 	CSR_WRITE_FLUSH(sc);
   11143 	delay(1000);
   11144 
   11145 	ctrl =  CSR_READ(sc, WMREG_CTRL);
   11146 	signal = wm_tbi_havesignal(sc, ctrl);
   11147 
   11148 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11149 		signal));
   11150 
   11151 	if (signal) {
   11152 		/* Have signal; wait for the link to come up. */
   11153 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11154 			delay(10000);
   11155 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11156 				break;
   11157 		}
   11158 
   11159 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11160 			device_xname(sc->sc_dev),i));
   11161 
   11162 		status = CSR_READ(sc, WMREG_STATUS);
   11163 		DPRINTF(WM_DEBUG_LINK,
   11164 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11165 			device_xname(sc->sc_dev),status, STATUS_LU));
   11166 		if (status & STATUS_LU) {
   11167 			/* Link is up. */
   11168 			DPRINTF(WM_DEBUG_LINK,
   11169 			    ("%s: LINK: set media -> link up %s\n",
   11170 				device_xname(sc->sc_dev),
   11171 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11172 
   11173 			/*
   11174 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11175 			 * so we should update sc->sc_ctrl
   11176 			 */
   11177 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11178 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11179 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11180 			if (status & STATUS_FD)
   11181 				sc->sc_tctl |=
   11182 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11183 			else
   11184 				sc->sc_tctl |=
   11185 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11186 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11187 				sc->sc_fcrtl |= FCRTL_XONE;
   11188 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11189 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11190 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11191 			sc->sc_tbi_linkup = 1;
   11192 		} else {
   11193 			if (i == WM_LINKUP_TIMEOUT)
   11194 				wm_check_for_link(sc);
   11195 			/* Link is down. */
   11196 			DPRINTF(WM_DEBUG_LINK,
   11197 			    ("%s: LINK: set media -> link down\n",
   11198 				device_xname(sc->sc_dev)));
   11199 			sc->sc_tbi_linkup = 0;
   11200 		}
   11201 	} else {
   11202 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11203 			device_xname(sc->sc_dev)));
   11204 		sc->sc_tbi_linkup = 0;
   11205 	}
   11206 
   11207 	wm_tbi_serdes_set_linkled(sc);
   11208 
   11209 	return 0;
   11210 }
   11211 
   11212 /*
   11213  * wm_tbi_mediastatus:	[ifmedia interface function]
   11214  *
   11215  *	Get the current interface media status on a 1000BASE-X device.
   11216  */
   11217 static void
   11218 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11219 {
   11220 	struct wm_softc *sc = ifp->if_softc;
   11221 	uint32_t ctrl, status;
   11222 
   11223 	ifmr->ifm_status = IFM_AVALID;
   11224 	ifmr->ifm_active = IFM_ETHER;
   11225 
   11226 	status = CSR_READ(sc, WMREG_STATUS);
   11227 	if ((status & STATUS_LU) == 0) {
   11228 		ifmr->ifm_active |= IFM_NONE;
   11229 		return;
   11230 	}
   11231 
   11232 	ifmr->ifm_status |= IFM_ACTIVE;
   11233 	/* Only 82545 is LX */
   11234 	if (sc->sc_type == WM_T_82545)
   11235 		ifmr->ifm_active |= IFM_1000_LX;
   11236 	else
   11237 		ifmr->ifm_active |= IFM_1000_SX;
   11238 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11239 		ifmr->ifm_active |= IFM_FDX;
   11240 	else
   11241 		ifmr->ifm_active |= IFM_HDX;
   11242 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11243 	if (ctrl & CTRL_RFCE)
   11244 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11245 	if (ctrl & CTRL_TFCE)
   11246 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11247 }
   11248 
   11249 /* XXX TBI only */
   11250 static int
   11251 wm_check_for_link(struct wm_softc *sc)
   11252 {
   11253 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11254 	uint32_t rxcw;
   11255 	uint32_t ctrl;
   11256 	uint32_t status;
   11257 	bool signal;
   11258 
   11259 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11260 		device_xname(sc->sc_dev), __func__));
   11261 
   11262 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11263 		/* XXX need some work for >= 82571 */
   11264 		if (sc->sc_type >= WM_T_82571) {
   11265 			sc->sc_tbi_linkup = 1;
   11266 			return 0;
   11267 		}
   11268 	}
   11269 
   11270 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11271 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11272 	status = CSR_READ(sc, WMREG_STATUS);
   11273 	signal = wm_tbi_havesignal(sc, ctrl);
   11274 
   11275 	DPRINTF(WM_DEBUG_LINK,
   11276 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11277 		device_xname(sc->sc_dev), __func__, signal,
   11278 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11279 
   11280 	/*
   11281 	 * SWDPIN   LU RXCW
   11282 	 *	0    0	  0
   11283 	 *	0    0	  1	(should not happen)
   11284 	 *	0    1	  0	(should not happen)
   11285 	 *	0    1	  1	(should not happen)
   11286 	 *	1    0	  0	Disable autonego and force linkup
   11287 	 *	1    0	  1	got /C/ but not linkup yet
   11288 	 *	1    1	  0	(linkup)
   11289 	 *	1    1	  1	If IFM_AUTO, back to autonego
   11290 	 *
   11291 	 */
   11292 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   11293 		DPRINTF(WM_DEBUG_LINK,
   11294 		    ("%s: %s: force linkup and fullduplex\n",
   11295 			device_xname(sc->sc_dev), __func__));
   11296 		sc->sc_tbi_linkup = 0;
   11297 		/* Disable auto-negotiation in the TXCW register */
   11298 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11299 
   11300 		/*
   11301 		 * Force link-up and also force full-duplex.
   11302 		 *
   11303 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11304 		 * so we should update sc->sc_ctrl
   11305 		 */
   11306 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11307 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11308 	} else if (((status & STATUS_LU) != 0)
   11309 	    && ((rxcw & RXCW_C) != 0)
   11310 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11311 		sc->sc_tbi_linkup = 1;
   11312 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   11313 			device_xname(sc->sc_dev),
   11314 			__func__));
   11315 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11316 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11317 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   11318 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   11319 			device_xname(sc->sc_dev), __func__));
   11320 	} else {
   11321 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   11322 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   11323 			status));
   11324 	}
   11325 
   11326 	return 0;
   11327 }
   11328 
   11329 /*
   11330  * wm_tbi_tick:
   11331  *
   11332  *	Check the link on TBI devices.
   11333  *	This function acts as mii_tick().
   11334  */
   11335 static void
   11336 wm_tbi_tick(struct wm_softc *sc)
   11337 {
   11338 	struct mii_data *mii = &sc->sc_mii;
   11339 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11340 	uint32_t status;
   11341 
   11342 	KASSERT(WM_CORE_LOCKED(sc));
   11343 
   11344 	status = CSR_READ(sc, WMREG_STATUS);
   11345 
   11346 	/* XXX is this needed? */
   11347 	(void)CSR_READ(sc, WMREG_RXCW);
   11348 	(void)CSR_READ(sc, WMREG_CTRL);
   11349 
   11350 	/* set link status */
   11351 	if ((status & STATUS_LU) == 0) {
   11352 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   11353 			device_xname(sc->sc_dev)));
   11354 		sc->sc_tbi_linkup = 0;
   11355 	} else if (sc->sc_tbi_linkup == 0) {
   11356 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   11357 			device_xname(sc->sc_dev),
   11358 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11359 		sc->sc_tbi_linkup = 1;
   11360 		sc->sc_tbi_serdes_ticks = 0;
   11361 	}
   11362 
   11363 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11364 		goto setled;
   11365 
   11366 	if ((status & STATUS_LU) == 0) {
   11367 		sc->sc_tbi_linkup = 0;
   11368 		/* If the timer expired, retry autonegotiation */
   11369 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11370 		    && (++sc->sc_tbi_serdes_ticks
   11371 			>= sc->sc_tbi_serdes_anegticks)) {
   11372 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11373 			sc->sc_tbi_serdes_ticks = 0;
   11374 			/*
   11375 			 * Reset the link, and let autonegotiation do
   11376 			 * its thing
   11377 			 */
   11378 			sc->sc_ctrl |= CTRL_LRST;
   11379 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11380 			CSR_WRITE_FLUSH(sc);
   11381 			delay(1000);
   11382 			sc->sc_ctrl &= ~CTRL_LRST;
   11383 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11384 			CSR_WRITE_FLUSH(sc);
   11385 			delay(1000);
   11386 			CSR_WRITE(sc, WMREG_TXCW,
   11387 			    sc->sc_txcw & ~TXCW_ANE);
   11388 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11389 		}
   11390 	}
   11391 
   11392 setled:
   11393 	wm_tbi_serdes_set_linkled(sc);
   11394 }
   11395 
   11396 /* SERDES related */
   11397 static void
   11398 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11399 {
   11400 	uint32_t reg;
   11401 
   11402 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11403 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11404 		return;
   11405 
   11406 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11407 	reg |= PCS_CFG_PCS_EN;
   11408 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11409 
   11410 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11411 	reg &= ~CTRL_EXT_SWDPIN(3);
   11412 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11413 	CSR_WRITE_FLUSH(sc);
   11414 }
   11415 
   11416 static int
   11417 wm_serdes_mediachange(struct ifnet *ifp)
   11418 {
   11419 	struct wm_softc *sc = ifp->if_softc;
   11420 	bool pcs_autoneg = true; /* XXX */
   11421 	uint32_t ctrl_ext, pcs_lctl, reg;
   11422 
   11423 	/* XXX Currently, this function is not called on 8257[12] */
   11424 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11425 	    || (sc->sc_type >= WM_T_82575))
   11426 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11427 
   11428 	wm_serdes_power_up_link_82575(sc);
   11429 
   11430 	sc->sc_ctrl |= CTRL_SLU;
   11431 
   11432 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11433 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11434 
   11435 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11436 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11437 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11438 	case CTRL_EXT_LINK_MODE_SGMII:
   11439 		pcs_autoneg = true;
   11440 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11441 		break;
   11442 	case CTRL_EXT_LINK_MODE_1000KX:
   11443 		pcs_autoneg = false;
   11444 		/* FALLTHROUGH */
   11445 	default:
   11446 		if ((sc->sc_type == WM_T_82575)
   11447 		    || (sc->sc_type == WM_T_82576)) {
   11448 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11449 				pcs_autoneg = false;
   11450 		}
   11451 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11452 		    | CTRL_FRCFDX;
   11453 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11454 	}
   11455 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11456 
   11457 	if (pcs_autoneg) {
   11458 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11459 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11460 
   11461 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11462 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11463 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11464 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11465 	} else
   11466 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11467 
   11468 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11469 
   11470 
   11471 	return 0;
   11472 }
   11473 
   11474 static void
   11475 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11476 {
   11477 	struct wm_softc *sc = ifp->if_softc;
   11478 	struct mii_data *mii = &sc->sc_mii;
   11479 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11480 	uint32_t pcs_adv, pcs_lpab, reg;
   11481 
   11482 	ifmr->ifm_status = IFM_AVALID;
   11483 	ifmr->ifm_active = IFM_ETHER;
   11484 
   11485 	/* Check PCS */
   11486 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11487 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11488 		ifmr->ifm_active |= IFM_NONE;
   11489 		sc->sc_tbi_linkup = 0;
   11490 		goto setled;
   11491 	}
   11492 
   11493 	sc->sc_tbi_linkup = 1;
   11494 	ifmr->ifm_status |= IFM_ACTIVE;
   11495 	if (sc->sc_type == WM_T_I354) {
   11496 		uint32_t status;
   11497 
   11498 		status = CSR_READ(sc, WMREG_STATUS);
   11499 		if (((status & STATUS_2P5_SKU) != 0)
   11500 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11501 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11502 		} else
   11503 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11504 	} else {
   11505 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11506 		case PCS_LSTS_SPEED_10:
   11507 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11508 			break;
   11509 		case PCS_LSTS_SPEED_100:
   11510 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11511 			break;
   11512 		case PCS_LSTS_SPEED_1000:
   11513 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11514 			break;
   11515 		default:
   11516 			device_printf(sc->sc_dev, "Unknown speed\n");
   11517 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11518 			break;
   11519 		}
   11520 	}
   11521 	if ((reg & PCS_LSTS_FDX) != 0)
   11522 		ifmr->ifm_active |= IFM_FDX;
   11523 	else
   11524 		ifmr->ifm_active |= IFM_HDX;
   11525 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11526 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11527 		/* Check flow */
   11528 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11529 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11530 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11531 			goto setled;
   11532 		}
   11533 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11534 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11535 		DPRINTF(WM_DEBUG_LINK,
   11536 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11537 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11538 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11539 			mii->mii_media_active |= IFM_FLOW
   11540 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11541 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11542 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11543 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11544 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11545 			mii->mii_media_active |= IFM_FLOW
   11546 			    | IFM_ETH_TXPAUSE;
   11547 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11548 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11549 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11550 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11551 			mii->mii_media_active |= IFM_FLOW
   11552 			    | IFM_ETH_RXPAUSE;
   11553 		}
   11554 	}
   11555 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11556 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11557 setled:
   11558 	wm_tbi_serdes_set_linkled(sc);
   11559 }
   11560 
   11561 /*
   11562  * wm_serdes_tick:
   11563  *
   11564  *	Check the link on serdes devices.
   11565  */
   11566 static void
   11567 wm_serdes_tick(struct wm_softc *sc)
   11568 {
   11569 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11570 	struct mii_data *mii = &sc->sc_mii;
   11571 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11572 	uint32_t reg;
   11573 
   11574 	KASSERT(WM_CORE_LOCKED(sc));
   11575 
   11576 	mii->mii_media_status = IFM_AVALID;
   11577 	mii->mii_media_active = IFM_ETHER;
   11578 
   11579 	/* Check PCS */
   11580 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11581 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11582 		mii->mii_media_status |= IFM_ACTIVE;
   11583 		sc->sc_tbi_linkup = 1;
   11584 		sc->sc_tbi_serdes_ticks = 0;
   11585 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11586 		if ((reg & PCS_LSTS_FDX) != 0)
   11587 			mii->mii_media_active |= IFM_FDX;
   11588 		else
   11589 			mii->mii_media_active |= IFM_HDX;
   11590 	} else {
   11591 		mii->mii_media_status |= IFM_NONE;
   11592 		sc->sc_tbi_linkup = 0;
   11593 		/* If the timer expired, retry autonegotiation */
   11594 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11595 		    && (++sc->sc_tbi_serdes_ticks
   11596 			>= sc->sc_tbi_serdes_anegticks)) {
   11597 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11598 			sc->sc_tbi_serdes_ticks = 0;
   11599 			/* XXX */
   11600 			wm_serdes_mediachange(ifp);
   11601 		}
   11602 	}
   11603 
   11604 	wm_tbi_serdes_set_linkled(sc);
   11605 }
   11606 
   11607 /* SFP related */
   11608 
   11609 static int
   11610 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11611 {
   11612 	uint32_t i2ccmd;
   11613 	int i;
   11614 
   11615 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11616 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11617 
   11618 	/* Poll the ready bit */
   11619 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11620 		delay(50);
   11621 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11622 		if (i2ccmd & I2CCMD_READY)
   11623 			break;
   11624 	}
   11625 	if ((i2ccmd & I2CCMD_READY) == 0)
   11626 		return -1;
   11627 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11628 		return -1;
   11629 
   11630 	*data = i2ccmd & 0x00ff;
   11631 
   11632 	return 0;
   11633 }
   11634 
   11635 static uint32_t
   11636 wm_sfp_get_media_type(struct wm_softc *sc)
   11637 {
   11638 	uint32_t ctrl_ext;
   11639 	uint8_t val = 0;
   11640 	int timeout = 3;
   11641 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11642 	int rv = -1;
   11643 
   11644 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11645 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11646 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11647 	CSR_WRITE_FLUSH(sc);
   11648 
   11649 	/* Read SFP module data */
   11650 	while (timeout) {
   11651 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11652 		if (rv == 0)
   11653 			break;
   11654 		delay(100*1000); /* XXX too big */
   11655 		timeout--;
   11656 	}
   11657 	if (rv != 0)
   11658 		goto out;
   11659 	switch (val) {
   11660 	case SFF_SFP_ID_SFF:
   11661 		aprint_normal_dev(sc->sc_dev,
   11662 		    "Module/Connector soldered to board\n");
   11663 		break;
   11664 	case SFF_SFP_ID_SFP:
   11665 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11666 		break;
   11667 	case SFF_SFP_ID_UNKNOWN:
   11668 		goto out;
   11669 	default:
   11670 		break;
   11671 	}
   11672 
   11673 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11674 	if (rv != 0) {
   11675 		goto out;
   11676 	}
   11677 
   11678 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11679 		mediatype = WM_MEDIATYPE_SERDES;
   11680 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   11681 		sc->sc_flags |= WM_F_SGMII;
   11682 		mediatype = WM_MEDIATYPE_COPPER;
   11683 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   11684 		sc->sc_flags |= WM_F_SGMII;
   11685 		mediatype = WM_MEDIATYPE_SERDES;
   11686 	}
   11687 
   11688 out:
   11689 	/* Restore I2C interface setting */
   11690 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11691 
   11692 	return mediatype;
   11693 }
   11694 
   11695 /*
   11696  * NVM related.
   11697  * Microwire, SPI (w/wo EERD) and Flash.
   11698  */
   11699 
   11700 /* Both spi and uwire */
   11701 
   11702 /*
   11703  * wm_eeprom_sendbits:
   11704  *
   11705  *	Send a series of bits to the EEPROM.
   11706  */
   11707 static void
   11708 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11709 {
   11710 	uint32_t reg;
   11711 	int x;
   11712 
   11713 	reg = CSR_READ(sc, WMREG_EECD);
   11714 
   11715 	for (x = nbits; x > 0; x--) {
   11716 		if (bits & (1U << (x - 1)))
   11717 			reg |= EECD_DI;
   11718 		else
   11719 			reg &= ~EECD_DI;
   11720 		CSR_WRITE(sc, WMREG_EECD, reg);
   11721 		CSR_WRITE_FLUSH(sc);
   11722 		delay(2);
   11723 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11724 		CSR_WRITE_FLUSH(sc);
   11725 		delay(2);
   11726 		CSR_WRITE(sc, WMREG_EECD, reg);
   11727 		CSR_WRITE_FLUSH(sc);
   11728 		delay(2);
   11729 	}
   11730 }
   11731 
   11732 /*
   11733  * wm_eeprom_recvbits:
   11734  *
   11735  *	Receive a series of bits from the EEPROM.
   11736  */
   11737 static void
   11738 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11739 {
   11740 	uint32_t reg, val;
   11741 	int x;
   11742 
   11743 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11744 
   11745 	val = 0;
   11746 	for (x = nbits; x > 0; x--) {
   11747 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11748 		CSR_WRITE_FLUSH(sc);
   11749 		delay(2);
   11750 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11751 			val |= (1U << (x - 1));
   11752 		CSR_WRITE(sc, WMREG_EECD, reg);
   11753 		CSR_WRITE_FLUSH(sc);
   11754 		delay(2);
   11755 	}
   11756 	*valp = val;
   11757 }
   11758 
   11759 /* Microwire */
   11760 
   11761 /*
   11762  * wm_nvm_read_uwire:
   11763  *
   11764  *	Read a word from the EEPROM using the MicroWire protocol.
   11765  */
   11766 static int
   11767 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11768 {
   11769 	uint32_t reg, val;
   11770 	int i;
   11771 
   11772 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11773 		device_xname(sc->sc_dev), __func__));
   11774 
   11775 	if (sc->nvm.acquire(sc) != 0)
   11776 		return -1;
   11777 
   11778 	for (i = 0; i < wordcnt; i++) {
   11779 		/* Clear SK and DI. */
   11780 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11781 		CSR_WRITE(sc, WMREG_EECD, reg);
   11782 
   11783 		/*
   11784 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11785 		 * and Xen.
   11786 		 *
   11787 		 * We use this workaround only for 82540 because qemu's
   11788 		 * e1000 act as 82540.
   11789 		 */
   11790 		if (sc->sc_type == WM_T_82540) {
   11791 			reg |= EECD_SK;
   11792 			CSR_WRITE(sc, WMREG_EECD, reg);
   11793 			reg &= ~EECD_SK;
   11794 			CSR_WRITE(sc, WMREG_EECD, reg);
   11795 			CSR_WRITE_FLUSH(sc);
   11796 			delay(2);
   11797 		}
   11798 		/* XXX: end of workaround */
   11799 
   11800 		/* Set CHIP SELECT. */
   11801 		reg |= EECD_CS;
   11802 		CSR_WRITE(sc, WMREG_EECD, reg);
   11803 		CSR_WRITE_FLUSH(sc);
   11804 		delay(2);
   11805 
   11806 		/* Shift in the READ command. */
   11807 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11808 
   11809 		/* Shift in address. */
   11810 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11811 
   11812 		/* Shift out the data. */
   11813 		wm_eeprom_recvbits(sc, &val, 16);
   11814 		data[i] = val & 0xffff;
   11815 
   11816 		/* Clear CHIP SELECT. */
   11817 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11818 		CSR_WRITE(sc, WMREG_EECD, reg);
   11819 		CSR_WRITE_FLUSH(sc);
   11820 		delay(2);
   11821 	}
   11822 
   11823 	sc->nvm.release(sc);
   11824 	return 0;
   11825 }
   11826 
   11827 /* SPI */
   11828 
   11829 /*
   11830  * Set SPI and FLASH related information from the EECD register.
   11831  * For 82541 and 82547, the word size is taken from EEPROM.
   11832  */
   11833 static int
   11834 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11835 {
   11836 	int size;
   11837 	uint32_t reg;
   11838 	uint16_t data;
   11839 
   11840 	reg = CSR_READ(sc, WMREG_EECD);
   11841 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11842 
   11843 	/* Read the size of NVM from EECD by default */
   11844 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11845 	switch (sc->sc_type) {
   11846 	case WM_T_82541:
   11847 	case WM_T_82541_2:
   11848 	case WM_T_82547:
   11849 	case WM_T_82547_2:
   11850 		/* Set dummy value to access EEPROM */
   11851 		sc->sc_nvm_wordsize = 64;
   11852 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   11853 			aprint_error_dev(sc->sc_dev,
   11854 			    "%s: failed to read EEPROM size\n", __func__);
   11855 		}
   11856 		reg = data;
   11857 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11858 		if (size == 0)
   11859 			size = 6; /* 64 word size */
   11860 		else
   11861 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11862 		break;
   11863 	case WM_T_80003:
   11864 	case WM_T_82571:
   11865 	case WM_T_82572:
   11866 	case WM_T_82573: /* SPI case */
   11867 	case WM_T_82574: /* SPI case */
   11868 	case WM_T_82583: /* SPI case */
   11869 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11870 		if (size > 14)
   11871 			size = 14;
   11872 		break;
   11873 	case WM_T_82575:
   11874 	case WM_T_82576:
   11875 	case WM_T_82580:
   11876 	case WM_T_I350:
   11877 	case WM_T_I354:
   11878 	case WM_T_I210:
   11879 	case WM_T_I211:
   11880 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11881 		if (size > 15)
   11882 			size = 15;
   11883 		break;
   11884 	default:
   11885 		aprint_error_dev(sc->sc_dev,
   11886 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11887 		return -1;
   11888 		break;
   11889 	}
   11890 
   11891 	sc->sc_nvm_wordsize = 1 << size;
   11892 
   11893 	return 0;
   11894 }
   11895 
   11896 /*
   11897  * wm_nvm_ready_spi:
   11898  *
   11899  *	Wait for a SPI EEPROM to be ready for commands.
   11900  */
   11901 static int
   11902 wm_nvm_ready_spi(struct wm_softc *sc)
   11903 {
   11904 	uint32_t val;
   11905 	int usec;
   11906 
   11907 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11908 		device_xname(sc->sc_dev), __func__));
   11909 
   11910 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11911 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11912 		wm_eeprom_recvbits(sc, &val, 8);
   11913 		if ((val & SPI_SR_RDY) == 0)
   11914 			break;
   11915 	}
   11916 	if (usec >= SPI_MAX_RETRIES) {
   11917 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11918 		return -1;
   11919 	}
   11920 	return 0;
   11921 }
   11922 
   11923 /*
   11924  * wm_nvm_read_spi:
   11925  *
   11926  *	Read a work from the EEPROM using the SPI protocol.
   11927  */
   11928 static int
   11929 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11930 {
   11931 	uint32_t reg, val;
   11932 	int i;
   11933 	uint8_t opc;
   11934 	int rv = 0;
   11935 
   11936 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11937 		device_xname(sc->sc_dev), __func__));
   11938 
   11939 	if (sc->nvm.acquire(sc) != 0)
   11940 		return -1;
   11941 
   11942 	/* Clear SK and CS. */
   11943 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11944 	CSR_WRITE(sc, WMREG_EECD, reg);
   11945 	CSR_WRITE_FLUSH(sc);
   11946 	delay(2);
   11947 
   11948 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   11949 		goto out;
   11950 
   11951 	/* Toggle CS to flush commands. */
   11952 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11953 	CSR_WRITE_FLUSH(sc);
   11954 	delay(2);
   11955 	CSR_WRITE(sc, WMREG_EECD, reg);
   11956 	CSR_WRITE_FLUSH(sc);
   11957 	delay(2);
   11958 
   11959 	opc = SPI_OPC_READ;
   11960 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11961 		opc |= SPI_OPC_A8;
   11962 
   11963 	wm_eeprom_sendbits(sc, opc, 8);
   11964 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11965 
   11966 	for (i = 0; i < wordcnt; i++) {
   11967 		wm_eeprom_recvbits(sc, &val, 16);
   11968 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11969 	}
   11970 
   11971 	/* Raise CS and clear SK. */
   11972 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11973 	CSR_WRITE(sc, WMREG_EECD, reg);
   11974 	CSR_WRITE_FLUSH(sc);
   11975 	delay(2);
   11976 
   11977 out:
   11978 	sc->nvm.release(sc);
   11979 	return rv;
   11980 }
   11981 
   11982 /* Using with EERD */
   11983 
   11984 static int
   11985 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11986 {
   11987 	uint32_t attempts = 100000;
   11988 	uint32_t i, reg = 0;
   11989 	int32_t done = -1;
   11990 
   11991 	for (i = 0; i < attempts; i++) {
   11992 		reg = CSR_READ(sc, rw);
   11993 
   11994 		if (reg & EERD_DONE) {
   11995 			done = 0;
   11996 			break;
   11997 		}
   11998 		delay(5);
   11999 	}
   12000 
   12001 	return done;
   12002 }
   12003 
   12004 static int
   12005 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12006 {
   12007 	int i, eerd = 0;
   12008 	int rv = 0;
   12009 
   12010 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12011 		device_xname(sc->sc_dev), __func__));
   12012 
   12013 	if (sc->nvm.acquire(sc) != 0)
   12014 		return -1;
   12015 
   12016 	for (i = 0; i < wordcnt; i++) {
   12017 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12018 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12019 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12020 		if (rv != 0) {
   12021 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12022 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12023 			break;
   12024 		}
   12025 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12026 	}
   12027 
   12028 	sc->nvm.release(sc);
   12029 	return rv;
   12030 }
   12031 
   12032 /* Flash */
   12033 
   12034 static int
   12035 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12036 {
   12037 	uint32_t eecd;
   12038 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12039 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12040 	uint32_t nvm_dword = 0;
   12041 	uint8_t sig_byte = 0;
   12042 	int rv;
   12043 
   12044 	switch (sc->sc_type) {
   12045 	case WM_T_PCH_SPT:
   12046 	case WM_T_PCH_CNP:
   12047 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12048 		act_offset = ICH_NVM_SIG_WORD * 2;
   12049 
   12050 		/* set bank to 0 in case flash read fails. */
   12051 		*bank = 0;
   12052 
   12053 		/* Check bank 0 */
   12054 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12055 		if (rv != 0)
   12056 			return rv;
   12057 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12058 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12059 			*bank = 0;
   12060 			return 0;
   12061 		}
   12062 
   12063 		/* Check bank 1 */
   12064 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12065 		    &nvm_dword);
   12066 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12067 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12068 			*bank = 1;
   12069 			return 0;
   12070 		}
   12071 		aprint_error_dev(sc->sc_dev,
   12072 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12073 		return -1;
   12074 	case WM_T_ICH8:
   12075 	case WM_T_ICH9:
   12076 		eecd = CSR_READ(sc, WMREG_EECD);
   12077 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12078 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12079 			return 0;
   12080 		}
   12081 		/* FALLTHROUGH */
   12082 	default:
   12083 		/* Default to 0 */
   12084 		*bank = 0;
   12085 
   12086 		/* Check bank 0 */
   12087 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12088 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12089 			*bank = 0;
   12090 			return 0;
   12091 		}
   12092 
   12093 		/* Check bank 1 */
   12094 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12095 		    &sig_byte);
   12096 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12097 			*bank = 1;
   12098 			return 0;
   12099 		}
   12100 	}
   12101 
   12102 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12103 		device_xname(sc->sc_dev)));
   12104 	return -1;
   12105 }
   12106 
   12107 /******************************************************************************
   12108  * This function does initial flash setup so that a new read/write/erase cycle
   12109  * can be started.
   12110  *
   12111  * sc - The pointer to the hw structure
   12112  ****************************************************************************/
   12113 static int32_t
   12114 wm_ich8_cycle_init(struct wm_softc *sc)
   12115 {
   12116 	uint16_t hsfsts;
   12117 	int32_t error = 1;
   12118 	int32_t i     = 0;
   12119 
   12120 	if (sc->sc_type >= WM_T_PCH_SPT)
   12121 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12122 	else
   12123 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12124 
   12125 	/* May be check the Flash Des Valid bit in Hw status */
   12126 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   12127 		return error;
   12128 	}
   12129 
   12130 	/* Clear FCERR in Hw status by writing 1 */
   12131 	/* Clear DAEL in Hw status by writing a 1 */
   12132 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12133 
   12134 	if (sc->sc_type >= WM_T_PCH_SPT)
   12135 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12136 	else
   12137 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12138 
   12139 	/*
   12140 	 * Either we should have a hardware SPI cycle in progress bit to check
   12141 	 * against, in order to start a new cycle or FDONE bit should be
   12142 	 * changed in the hardware so that it is 1 after harware reset, which
   12143 	 * can then be used as an indication whether a cycle is in progress or
   12144 	 * has been completed .. we should also have some software semaphore
   12145 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12146 	 * threads access to those bits can be sequentiallized or a way so that
   12147 	 * 2 threads dont start the cycle at the same time
   12148 	 */
   12149 
   12150 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12151 		/*
   12152 		 * There is no cycle running at present, so we can start a
   12153 		 * cycle
   12154 		 */
   12155 
   12156 		/* Begin by setting Flash Cycle Done. */
   12157 		hsfsts |= HSFSTS_DONE;
   12158 		if (sc->sc_type >= WM_T_PCH_SPT)
   12159 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12160 			    hsfsts & 0xffffUL);
   12161 		else
   12162 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12163 		error = 0;
   12164 	} else {
   12165 		/*
   12166 		 * otherwise poll for sometime so the current cycle has a
   12167 		 * chance to end before giving up.
   12168 		 */
   12169 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12170 			if (sc->sc_type >= WM_T_PCH_SPT)
   12171 				hsfsts = ICH8_FLASH_READ32(sc,
   12172 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12173 			else
   12174 				hsfsts = ICH8_FLASH_READ16(sc,
   12175 				    ICH_FLASH_HSFSTS);
   12176 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12177 				error = 0;
   12178 				break;
   12179 			}
   12180 			delay(1);
   12181 		}
   12182 		if (error == 0) {
   12183 			/*
   12184 			 * Successful in waiting for previous cycle to timeout,
   12185 			 * now set the Flash Cycle Done.
   12186 			 */
   12187 			hsfsts |= HSFSTS_DONE;
   12188 			if (sc->sc_type >= WM_T_PCH_SPT)
   12189 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12190 				    hsfsts & 0xffffUL);
   12191 			else
   12192 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12193 				    hsfsts);
   12194 		}
   12195 	}
   12196 	return error;
   12197 }
   12198 
   12199 /******************************************************************************
   12200  * This function starts a flash cycle and waits for its completion
   12201  *
   12202  * sc - The pointer to the hw structure
   12203  ****************************************************************************/
   12204 static int32_t
   12205 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12206 {
   12207 	uint16_t hsflctl;
   12208 	uint16_t hsfsts;
   12209 	int32_t error = 1;
   12210 	uint32_t i = 0;
   12211 
   12212 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12213 	if (sc->sc_type >= WM_T_PCH_SPT)
   12214 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12215 	else
   12216 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12217 	hsflctl |= HSFCTL_GO;
   12218 	if (sc->sc_type >= WM_T_PCH_SPT)
   12219 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12220 		    (uint32_t)hsflctl << 16);
   12221 	else
   12222 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12223 
   12224 	/* Wait till FDONE bit is set to 1 */
   12225 	do {
   12226 		if (sc->sc_type >= WM_T_PCH_SPT)
   12227 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12228 			    & 0xffffUL;
   12229 		else
   12230 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12231 		if (hsfsts & HSFSTS_DONE)
   12232 			break;
   12233 		delay(1);
   12234 		i++;
   12235 	} while (i < timeout);
   12236 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12237 		error = 0;
   12238 
   12239 	return error;
   12240 }
   12241 
   12242 /******************************************************************************
   12243  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12244  *
   12245  * sc - The pointer to the hw structure
   12246  * index - The index of the byte or word to read.
   12247  * size - Size of data to read, 1=byte 2=word, 4=dword
   12248  * data - Pointer to the word to store the value read.
   12249  *****************************************************************************/
   12250 static int32_t
   12251 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12252     uint32_t size, uint32_t *data)
   12253 {
   12254 	uint16_t hsfsts;
   12255 	uint16_t hsflctl;
   12256 	uint32_t flash_linear_address;
   12257 	uint32_t flash_data = 0;
   12258 	int32_t error = 1;
   12259 	int32_t count = 0;
   12260 
   12261 	if (size < 1  || size > 4 || data == 0x0 ||
   12262 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12263 		return error;
   12264 
   12265 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12266 	    sc->sc_ich8_flash_base;
   12267 
   12268 	do {
   12269 		delay(1);
   12270 		/* Steps */
   12271 		error = wm_ich8_cycle_init(sc);
   12272 		if (error)
   12273 			break;
   12274 
   12275 		if (sc->sc_type >= WM_T_PCH_SPT)
   12276 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12277 			    >> 16;
   12278 		else
   12279 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12280 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12281 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12282 		    & HSFCTL_BCOUNT_MASK;
   12283 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12284 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12285 			/*
   12286 			 * In SPT, This register is in Lan memory space, not
   12287 			 * flash. Therefore, only 32 bit access is supported.
   12288 			 */
   12289 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12290 			    (uint32_t)hsflctl << 16);
   12291 		} else
   12292 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12293 
   12294 		/*
   12295 		 * Write the last 24 bits of index into Flash Linear address
   12296 		 * field in Flash Address
   12297 		 */
   12298 		/* TODO: TBD maybe check the index against the size of flash */
   12299 
   12300 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12301 
   12302 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12303 
   12304 		/*
   12305 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12306 		 * the whole sequence a few more times, else read in (shift in)
   12307 		 * the Flash Data0, the order is least significant byte first
   12308 		 * msb to lsb
   12309 		 */
   12310 		if (error == 0) {
   12311 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12312 			if (size == 1)
   12313 				*data = (uint8_t)(flash_data & 0x000000FF);
   12314 			else if (size == 2)
   12315 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12316 			else if (size == 4)
   12317 				*data = (uint32_t)flash_data;
   12318 			break;
   12319 		} else {
   12320 			/*
   12321 			 * If we've gotten here, then things are probably
   12322 			 * completely hosed, but if the error condition is
   12323 			 * detected, it won't hurt to give it another try...
   12324 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12325 			 */
   12326 			if (sc->sc_type >= WM_T_PCH_SPT)
   12327 				hsfsts = ICH8_FLASH_READ32(sc,
   12328 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12329 			else
   12330 				hsfsts = ICH8_FLASH_READ16(sc,
   12331 				    ICH_FLASH_HSFSTS);
   12332 
   12333 			if (hsfsts & HSFSTS_ERR) {
   12334 				/* Repeat for some time before giving up. */
   12335 				continue;
   12336 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12337 				break;
   12338 		}
   12339 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12340 
   12341 	return error;
   12342 }
   12343 
   12344 /******************************************************************************
   12345  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12346  *
   12347  * sc - pointer to wm_hw structure
   12348  * index - The index of the byte to read.
   12349  * data - Pointer to a byte to store the value read.
   12350  *****************************************************************************/
   12351 static int32_t
   12352 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12353 {
   12354 	int32_t status;
   12355 	uint32_t word = 0;
   12356 
   12357 	status = wm_read_ich8_data(sc, index, 1, &word);
   12358 	if (status == 0)
   12359 		*data = (uint8_t)word;
   12360 	else
   12361 		*data = 0;
   12362 
   12363 	return status;
   12364 }
   12365 
   12366 /******************************************************************************
   12367  * Reads a word from the NVM using the ICH8 flash access registers.
   12368  *
   12369  * sc - pointer to wm_hw structure
   12370  * index - The starting byte index of the word to read.
   12371  * data - Pointer to a word to store the value read.
   12372  *****************************************************************************/
   12373 static int32_t
   12374 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12375 {
   12376 	int32_t status;
   12377 	uint32_t word = 0;
   12378 
   12379 	status = wm_read_ich8_data(sc, index, 2, &word);
   12380 	if (status == 0)
   12381 		*data = (uint16_t)word;
   12382 	else
   12383 		*data = 0;
   12384 
   12385 	return status;
   12386 }
   12387 
   12388 /******************************************************************************
   12389  * Reads a dword from the NVM using the ICH8 flash access registers.
   12390  *
   12391  * sc - pointer to wm_hw structure
   12392  * index - The starting byte index of the word to read.
   12393  * data - Pointer to a word to store the value read.
   12394  *****************************************************************************/
   12395 static int32_t
   12396 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12397 {
   12398 	int32_t status;
   12399 
   12400 	status = wm_read_ich8_data(sc, index, 4, data);
   12401 	return status;
   12402 }
   12403 
   12404 /******************************************************************************
   12405  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12406  * register.
   12407  *
   12408  * sc - Struct containing variables accessed by shared code
   12409  * offset - offset of word in the EEPROM to read
   12410  * data - word read from the EEPROM
   12411  * words - number of words to read
   12412  *****************************************************************************/
   12413 static int
   12414 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12415 {
   12416 	int32_t	 rv = 0;
   12417 	uint32_t flash_bank = 0;
   12418 	uint32_t act_offset = 0;
   12419 	uint32_t bank_offset = 0;
   12420 	uint16_t word = 0;
   12421 	uint16_t i = 0;
   12422 
   12423 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12424 		device_xname(sc->sc_dev), __func__));
   12425 
   12426 	if (sc->nvm.acquire(sc) != 0)
   12427 		return -1;
   12428 
   12429 	/*
   12430 	 * We need to know which is the valid flash bank.  In the event
   12431 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12432 	 * managing flash_bank. So it cannot be trusted and needs
   12433 	 * to be updated with each read.
   12434 	 */
   12435 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12436 	if (rv) {
   12437 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12438 			device_xname(sc->sc_dev)));
   12439 		flash_bank = 0;
   12440 	}
   12441 
   12442 	/*
   12443 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12444 	 * size
   12445 	 */
   12446 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12447 
   12448 	for (i = 0; i < words; i++) {
   12449 		/* The NVM part needs a byte offset, hence * 2 */
   12450 		act_offset = bank_offset + ((offset + i) * 2);
   12451 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12452 		if (rv) {
   12453 			aprint_error_dev(sc->sc_dev,
   12454 			    "%s: failed to read NVM\n", __func__);
   12455 			break;
   12456 		}
   12457 		data[i] = word;
   12458 	}
   12459 
   12460 	sc->nvm.release(sc);
   12461 	return rv;
   12462 }
   12463 
   12464 /******************************************************************************
   12465  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12466  * register.
   12467  *
   12468  * sc - Struct containing variables accessed by shared code
   12469  * offset - offset of word in the EEPROM to read
   12470  * data - word read from the EEPROM
   12471  * words - number of words to read
   12472  *****************************************************************************/
   12473 static int
   12474 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12475 {
   12476 	int32_t	 rv = 0;
   12477 	uint32_t flash_bank = 0;
   12478 	uint32_t act_offset = 0;
   12479 	uint32_t bank_offset = 0;
   12480 	uint32_t dword = 0;
   12481 	uint16_t i = 0;
   12482 
   12483 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12484 		device_xname(sc->sc_dev), __func__));
   12485 
   12486 	if (sc->nvm.acquire(sc) != 0)
   12487 		return -1;
   12488 
   12489 	/*
   12490 	 * We need to know which is the valid flash bank.  In the event
   12491 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12492 	 * managing flash_bank. So it cannot be trusted and needs
   12493 	 * to be updated with each read.
   12494 	 */
   12495 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12496 	if (rv) {
   12497 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12498 			device_xname(sc->sc_dev)));
   12499 		flash_bank = 0;
   12500 	}
   12501 
   12502 	/*
   12503 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12504 	 * size
   12505 	 */
   12506 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12507 
   12508 	for (i = 0; i < words; i++) {
   12509 		/* The NVM part needs a byte offset, hence * 2 */
   12510 		act_offset = bank_offset + ((offset + i) * 2);
   12511 		/* but we must read dword aligned, so mask ... */
   12512 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12513 		if (rv) {
   12514 			aprint_error_dev(sc->sc_dev,
   12515 			    "%s: failed to read NVM\n", __func__);
   12516 			break;
   12517 		}
   12518 		/* ... and pick out low or high word */
   12519 		if ((act_offset & 0x2) == 0)
   12520 			data[i] = (uint16_t)(dword & 0xFFFF);
   12521 		else
   12522 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12523 	}
   12524 
   12525 	sc->nvm.release(sc);
   12526 	return rv;
   12527 }
   12528 
   12529 /* iNVM */
   12530 
   12531 static int
   12532 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12533 {
   12534 	int32_t	 rv = 0;
   12535 	uint32_t invm_dword;
   12536 	uint16_t i;
   12537 	uint8_t record_type, word_address;
   12538 
   12539 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12540 		device_xname(sc->sc_dev), __func__));
   12541 
   12542 	for (i = 0; i < INVM_SIZE; i++) {
   12543 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12544 		/* Get record type */
   12545 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12546 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12547 			break;
   12548 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12549 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12550 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12551 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12552 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12553 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12554 			if (word_address == address) {
   12555 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12556 				rv = 0;
   12557 				break;
   12558 			}
   12559 		}
   12560 	}
   12561 
   12562 	return rv;
   12563 }
   12564 
   12565 static int
   12566 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12567 {
   12568 	int rv = 0;
   12569 	int i;
   12570 
   12571 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12572 		device_xname(sc->sc_dev), __func__));
   12573 
   12574 	if (sc->nvm.acquire(sc) != 0)
   12575 		return -1;
   12576 
   12577 	for (i = 0; i < words; i++) {
   12578 		switch (offset + i) {
   12579 		case NVM_OFF_MACADDR:
   12580 		case NVM_OFF_MACADDR1:
   12581 		case NVM_OFF_MACADDR2:
   12582 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12583 			if (rv != 0) {
   12584 				data[i] = 0xffff;
   12585 				rv = -1;
   12586 			}
   12587 			break;
   12588 		case NVM_OFF_CFG2:
   12589 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12590 			if (rv != 0) {
   12591 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12592 				rv = 0;
   12593 			}
   12594 			break;
   12595 		case NVM_OFF_CFG4:
   12596 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12597 			if (rv != 0) {
   12598 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12599 				rv = 0;
   12600 			}
   12601 			break;
   12602 		case NVM_OFF_LED_1_CFG:
   12603 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12604 			if (rv != 0) {
   12605 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12606 				rv = 0;
   12607 			}
   12608 			break;
   12609 		case NVM_OFF_LED_0_2_CFG:
   12610 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12611 			if (rv != 0) {
   12612 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12613 				rv = 0;
   12614 			}
   12615 			break;
   12616 		case NVM_OFF_ID_LED_SETTINGS:
   12617 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12618 			if (rv != 0) {
   12619 				*data = ID_LED_RESERVED_FFFF;
   12620 				rv = 0;
   12621 			}
   12622 			break;
   12623 		default:
   12624 			DPRINTF(WM_DEBUG_NVM,
   12625 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12626 			*data = NVM_RESERVED_WORD;
   12627 			break;
   12628 		}
   12629 	}
   12630 
   12631 	sc->nvm.release(sc);
   12632 	return rv;
   12633 }
   12634 
   12635 /* Lock, detecting NVM type, validate checksum, version and read */
   12636 
   12637 static int
   12638 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12639 {
   12640 	uint32_t eecd = 0;
   12641 
   12642 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12643 	    || sc->sc_type == WM_T_82583) {
   12644 		eecd = CSR_READ(sc, WMREG_EECD);
   12645 
   12646 		/* Isolate bits 15 & 16 */
   12647 		eecd = ((eecd >> 15) & 0x03);
   12648 
   12649 		/* If both bits are set, device is Flash type */
   12650 		if (eecd == 0x03)
   12651 			return 0;
   12652 	}
   12653 	return 1;
   12654 }
   12655 
   12656 static int
   12657 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   12658 {
   12659 	uint32_t eec;
   12660 
   12661 	eec = CSR_READ(sc, WMREG_EEC);
   12662 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12663 		return 1;
   12664 
   12665 	return 0;
   12666 }
   12667 
   12668 /*
   12669  * wm_nvm_validate_checksum
   12670  *
   12671  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12672  */
   12673 static int
   12674 wm_nvm_validate_checksum(struct wm_softc *sc)
   12675 {
   12676 	uint16_t checksum;
   12677 	uint16_t eeprom_data;
   12678 #ifdef WM_DEBUG
   12679 	uint16_t csum_wordaddr, valid_checksum;
   12680 #endif
   12681 	int i;
   12682 
   12683 	checksum = 0;
   12684 
   12685 	/* Don't check for I211 */
   12686 	if (sc->sc_type == WM_T_I211)
   12687 		return 0;
   12688 
   12689 #ifdef WM_DEBUG
   12690 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   12691 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   12692 		csum_wordaddr = NVM_OFF_COMPAT;
   12693 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12694 	} else {
   12695 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12696 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12697 	}
   12698 
   12699 	/* Dump EEPROM image for debug */
   12700 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12701 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12702 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12703 		/* XXX PCH_SPT? */
   12704 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12705 		if ((eeprom_data & valid_checksum) == 0) {
   12706 			DPRINTF(WM_DEBUG_NVM,
   12707 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12708 				device_xname(sc->sc_dev), eeprom_data,
   12709 				    valid_checksum));
   12710 		}
   12711 	}
   12712 
   12713 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12714 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12715 		for (i = 0; i < NVM_SIZE; i++) {
   12716 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12717 				printf("XXXX ");
   12718 			else
   12719 				printf("%04hx ", eeprom_data);
   12720 			if (i % 8 == 7)
   12721 				printf("\n");
   12722 		}
   12723 	}
   12724 
   12725 #endif /* WM_DEBUG */
   12726 
   12727 	for (i = 0; i < NVM_SIZE; i++) {
   12728 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12729 			return 1;
   12730 		checksum += eeprom_data;
   12731 	}
   12732 
   12733 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12734 #ifdef WM_DEBUG
   12735 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12736 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12737 #endif
   12738 	}
   12739 
   12740 	return 0;
   12741 }
   12742 
   12743 static void
   12744 wm_nvm_version_invm(struct wm_softc *sc)
   12745 {
   12746 	uint32_t dword;
   12747 
   12748 	/*
   12749 	 * Linux's code to decode version is very strange, so we don't
   12750 	 * obey that algorithm and just use word 61 as the document.
   12751 	 * Perhaps it's not perfect though...
   12752 	 *
   12753 	 * Example:
   12754 	 *
   12755 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12756 	 */
   12757 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12758 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12759 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12760 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12761 }
   12762 
   12763 static void
   12764 wm_nvm_version(struct wm_softc *sc)
   12765 {
   12766 	uint16_t major, minor, build, patch;
   12767 	uint16_t uid0, uid1;
   12768 	uint16_t nvm_data;
   12769 	uint16_t off;
   12770 	bool check_version = false;
   12771 	bool check_optionrom = false;
   12772 	bool have_build = false;
   12773 	bool have_uid = true;
   12774 
   12775 	/*
   12776 	 * Version format:
   12777 	 *
   12778 	 * XYYZ
   12779 	 * X0YZ
   12780 	 * X0YY
   12781 	 *
   12782 	 * Example:
   12783 	 *
   12784 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12785 	 *	82571	0x50a6	5.10.6?
   12786 	 *	82572	0x506a	5.6.10?
   12787 	 *	82572EI	0x5069	5.6.9?
   12788 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12789 	 *		0x2013	2.1.3?
   12790 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12791 	 */
   12792 
   12793 	/*
   12794 	 * XXX
   12795 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   12796 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   12797 	 */
   12798 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   12799 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   12800 		have_uid = false;
   12801 
   12802 	switch (sc->sc_type) {
   12803 	case WM_T_82571:
   12804 	case WM_T_82572:
   12805 	case WM_T_82574:
   12806 	case WM_T_82583:
   12807 		check_version = true;
   12808 		check_optionrom = true;
   12809 		have_build = true;
   12810 		break;
   12811 	case WM_T_82575:
   12812 	case WM_T_82576:
   12813 	case WM_T_82580:
   12814 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12815 			check_version = true;
   12816 		break;
   12817 	case WM_T_I211:
   12818 		wm_nvm_version_invm(sc);
   12819 		have_uid = false;
   12820 		goto printver;
   12821 	case WM_T_I210:
   12822 		if (!wm_nvm_flash_presence_i210(sc)) {
   12823 			wm_nvm_version_invm(sc);
   12824 			have_uid = false;
   12825 			goto printver;
   12826 		}
   12827 		/* FALLTHROUGH */
   12828 	case WM_T_I350:
   12829 	case WM_T_I354:
   12830 		check_version = true;
   12831 		check_optionrom = true;
   12832 		break;
   12833 	default:
   12834 		return;
   12835 	}
   12836 	if (check_version
   12837 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   12838 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12839 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12840 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12841 			build = nvm_data & NVM_BUILD_MASK;
   12842 			have_build = true;
   12843 		} else
   12844 			minor = nvm_data & 0x00ff;
   12845 
   12846 		/* Decimal */
   12847 		minor = (minor / 16) * 10 + (minor % 16);
   12848 		sc->sc_nvm_ver_major = major;
   12849 		sc->sc_nvm_ver_minor = minor;
   12850 
   12851 printver:
   12852 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12853 		    sc->sc_nvm_ver_minor);
   12854 		if (have_build) {
   12855 			sc->sc_nvm_ver_build = build;
   12856 			aprint_verbose(".%d", build);
   12857 		}
   12858 	}
   12859 
   12860 	/* Assume the Option ROM area is at avove NVM_SIZE */
   12861 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   12862 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   12863 		/* Option ROM Version */
   12864 		if ((off != 0x0000) && (off != 0xffff)) {
   12865 			int rv;
   12866 
   12867 			off += NVM_COMBO_VER_OFF;
   12868 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   12869 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   12870 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   12871 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12872 				/* 16bits */
   12873 				major = uid0 >> 8;
   12874 				build = (uid0 << 8) | (uid1 >> 8);
   12875 				patch = uid1 & 0x00ff;
   12876 				aprint_verbose(", option ROM Version %d.%d.%d",
   12877 				    major, build, patch);
   12878 			}
   12879 		}
   12880 	}
   12881 
   12882 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   12883 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12884 }
   12885 
   12886 /*
   12887  * wm_nvm_read:
   12888  *
   12889  *	Read data from the serial EEPROM.
   12890  */
   12891 static int
   12892 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12893 {
   12894 	int rv;
   12895 
   12896 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12897 		device_xname(sc->sc_dev), __func__));
   12898 
   12899 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12900 		return -1;
   12901 
   12902 	rv = sc->nvm.read(sc, word, wordcnt, data);
   12903 
   12904 	return rv;
   12905 }
   12906 
   12907 /*
   12908  * Hardware semaphores.
   12909  * Very complexed...
   12910  */
   12911 
   12912 static int
   12913 wm_get_null(struct wm_softc *sc)
   12914 {
   12915 
   12916 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12917 		device_xname(sc->sc_dev), __func__));
   12918 	return 0;
   12919 }
   12920 
   12921 static void
   12922 wm_put_null(struct wm_softc *sc)
   12923 {
   12924 
   12925 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12926 		device_xname(sc->sc_dev), __func__));
   12927 	return;
   12928 }
   12929 
   12930 static int
   12931 wm_get_eecd(struct wm_softc *sc)
   12932 {
   12933 	uint32_t reg;
   12934 	int x;
   12935 
   12936 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12937 		device_xname(sc->sc_dev), __func__));
   12938 
   12939 	reg = CSR_READ(sc, WMREG_EECD);
   12940 
   12941 	/* Request EEPROM access. */
   12942 	reg |= EECD_EE_REQ;
   12943 	CSR_WRITE(sc, WMREG_EECD, reg);
   12944 
   12945 	/* ..and wait for it to be granted. */
   12946 	for (x = 0; x < 1000; x++) {
   12947 		reg = CSR_READ(sc, WMREG_EECD);
   12948 		if (reg & EECD_EE_GNT)
   12949 			break;
   12950 		delay(5);
   12951 	}
   12952 	if ((reg & EECD_EE_GNT) == 0) {
   12953 		aprint_error_dev(sc->sc_dev,
   12954 		    "could not acquire EEPROM GNT\n");
   12955 		reg &= ~EECD_EE_REQ;
   12956 		CSR_WRITE(sc, WMREG_EECD, reg);
   12957 		return -1;
   12958 	}
   12959 
   12960 	return 0;
   12961 }
   12962 
   12963 static void
   12964 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   12965 {
   12966 
   12967 	*eecd |= EECD_SK;
   12968 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12969 	CSR_WRITE_FLUSH(sc);
   12970 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12971 		delay(1);
   12972 	else
   12973 		delay(50);
   12974 }
   12975 
   12976 static void
   12977 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   12978 {
   12979 
   12980 	*eecd &= ~EECD_SK;
   12981 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12982 	CSR_WRITE_FLUSH(sc);
   12983 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12984 		delay(1);
   12985 	else
   12986 		delay(50);
   12987 }
   12988 
   12989 static void
   12990 wm_put_eecd(struct wm_softc *sc)
   12991 {
   12992 	uint32_t reg;
   12993 
   12994 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12995 		device_xname(sc->sc_dev), __func__));
   12996 
   12997 	/* Stop nvm */
   12998 	reg = CSR_READ(sc, WMREG_EECD);
   12999 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13000 		/* Pull CS high */
   13001 		reg |= EECD_CS;
   13002 		wm_nvm_eec_clock_lower(sc, &reg);
   13003 	} else {
   13004 		/* CS on Microwire is active-high */
   13005 		reg &= ~(EECD_CS | EECD_DI);
   13006 		CSR_WRITE(sc, WMREG_EECD, reg);
   13007 		wm_nvm_eec_clock_raise(sc, &reg);
   13008 		wm_nvm_eec_clock_lower(sc, &reg);
   13009 	}
   13010 
   13011 	reg = CSR_READ(sc, WMREG_EECD);
   13012 	reg &= ~EECD_EE_REQ;
   13013 	CSR_WRITE(sc, WMREG_EECD, reg);
   13014 
   13015 	return;
   13016 }
   13017 
   13018 /*
   13019  * Get hardware semaphore.
   13020  * Same as e1000_get_hw_semaphore_generic()
   13021  */
   13022 static int
   13023 wm_get_swsm_semaphore(struct wm_softc *sc)
   13024 {
   13025 	int32_t timeout;
   13026 	uint32_t swsm;
   13027 
   13028 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13029 		device_xname(sc->sc_dev), __func__));
   13030 	KASSERT(sc->sc_nvm_wordsize > 0);
   13031 
   13032 retry:
   13033 	/* Get the SW semaphore. */
   13034 	timeout = sc->sc_nvm_wordsize + 1;
   13035 	while (timeout) {
   13036 		swsm = CSR_READ(sc, WMREG_SWSM);
   13037 
   13038 		if ((swsm & SWSM_SMBI) == 0)
   13039 			break;
   13040 
   13041 		delay(50);
   13042 		timeout--;
   13043 	}
   13044 
   13045 	if (timeout == 0) {
   13046 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13047 			/*
   13048 			 * In rare circumstances, the SW semaphore may already
   13049 			 * be held unintentionally. Clear the semaphore once
   13050 			 * before giving up.
   13051 			 */
   13052 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13053 			wm_put_swsm_semaphore(sc);
   13054 			goto retry;
   13055 		}
   13056 		aprint_error_dev(sc->sc_dev,
   13057 		    "could not acquire SWSM SMBI\n");
   13058 		return 1;
   13059 	}
   13060 
   13061 	/* Get the FW semaphore. */
   13062 	timeout = sc->sc_nvm_wordsize + 1;
   13063 	while (timeout) {
   13064 		swsm = CSR_READ(sc, WMREG_SWSM);
   13065 		swsm |= SWSM_SWESMBI;
   13066 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13067 		/* If we managed to set the bit we got the semaphore. */
   13068 		swsm = CSR_READ(sc, WMREG_SWSM);
   13069 		if (swsm & SWSM_SWESMBI)
   13070 			break;
   13071 
   13072 		delay(50);
   13073 		timeout--;
   13074 	}
   13075 
   13076 	if (timeout == 0) {
   13077 		aprint_error_dev(sc->sc_dev,
   13078 		    "could not acquire SWSM SWESMBI\n");
   13079 		/* Release semaphores */
   13080 		wm_put_swsm_semaphore(sc);
   13081 		return 1;
   13082 	}
   13083 	return 0;
   13084 }
   13085 
   13086 /*
   13087  * Put hardware semaphore.
   13088  * Same as e1000_put_hw_semaphore_generic()
   13089  */
   13090 static void
   13091 wm_put_swsm_semaphore(struct wm_softc *sc)
   13092 {
   13093 	uint32_t swsm;
   13094 
   13095 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13096 		device_xname(sc->sc_dev), __func__));
   13097 
   13098 	swsm = CSR_READ(sc, WMREG_SWSM);
   13099 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13100 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13101 }
   13102 
   13103 /*
   13104  * Get SW/FW semaphore.
   13105  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13106  */
   13107 static int
   13108 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13109 {
   13110 	uint32_t swfw_sync;
   13111 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13112 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13113 	int timeout;
   13114 
   13115 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13116 		device_xname(sc->sc_dev), __func__));
   13117 
   13118 	if (sc->sc_type == WM_T_80003)
   13119 		timeout = 50;
   13120 	else
   13121 		timeout = 200;
   13122 
   13123 	while (timeout) {
   13124 		if (wm_get_swsm_semaphore(sc)) {
   13125 			aprint_error_dev(sc->sc_dev,
   13126 			    "%s: failed to get semaphore\n",
   13127 			    __func__);
   13128 			return 1;
   13129 		}
   13130 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13131 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13132 			swfw_sync |= swmask;
   13133 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13134 			wm_put_swsm_semaphore(sc);
   13135 			return 0;
   13136 		}
   13137 		wm_put_swsm_semaphore(sc);
   13138 		delay(5000);
   13139 		timeout--;
   13140 	}
   13141 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13142 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13143 	return 1;
   13144 }
   13145 
   13146 static void
   13147 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13148 {
   13149 	uint32_t swfw_sync;
   13150 
   13151 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13152 		device_xname(sc->sc_dev), __func__));
   13153 
   13154 	while (wm_get_swsm_semaphore(sc) != 0)
   13155 		continue;
   13156 
   13157 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13158 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13159 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13160 
   13161 	wm_put_swsm_semaphore(sc);
   13162 }
   13163 
   13164 static int
   13165 wm_get_nvm_80003(struct wm_softc *sc)
   13166 {
   13167 	int rv;
   13168 
   13169 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13170 		device_xname(sc->sc_dev), __func__));
   13171 
   13172 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13173 		aprint_error_dev(sc->sc_dev,
   13174 		    "%s: failed to get semaphore(SWFW)\n",
   13175 		    __func__);
   13176 		return rv;
   13177 	}
   13178 
   13179 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13180 	    && (rv = wm_get_eecd(sc)) != 0) {
   13181 		aprint_error_dev(sc->sc_dev,
   13182 		    "%s: failed to get semaphore(EECD)\n",
   13183 		    __func__);
   13184 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13185 		return rv;
   13186 	}
   13187 
   13188 	return 0;
   13189 }
   13190 
   13191 static void
   13192 wm_put_nvm_80003(struct wm_softc *sc)
   13193 {
   13194 
   13195 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13196 		device_xname(sc->sc_dev), __func__));
   13197 
   13198 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13199 		wm_put_eecd(sc);
   13200 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13201 }
   13202 
   13203 static int
   13204 wm_get_nvm_82571(struct wm_softc *sc)
   13205 {
   13206 	int rv;
   13207 
   13208 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13209 		device_xname(sc->sc_dev), __func__));
   13210 
   13211 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13212 		return rv;
   13213 
   13214 	switch (sc->sc_type) {
   13215 	case WM_T_82573:
   13216 		break;
   13217 	default:
   13218 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13219 			rv = wm_get_eecd(sc);
   13220 		break;
   13221 	}
   13222 
   13223 	if (rv != 0) {
   13224 		aprint_error_dev(sc->sc_dev,
   13225 		    "%s: failed to get semaphore\n",
   13226 		    __func__);
   13227 		wm_put_swsm_semaphore(sc);
   13228 	}
   13229 
   13230 	return rv;
   13231 }
   13232 
   13233 static void
   13234 wm_put_nvm_82571(struct wm_softc *sc)
   13235 {
   13236 
   13237 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13238 		device_xname(sc->sc_dev), __func__));
   13239 
   13240 	switch (sc->sc_type) {
   13241 	case WM_T_82573:
   13242 		break;
   13243 	default:
   13244 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13245 			wm_put_eecd(sc);
   13246 		break;
   13247 	}
   13248 
   13249 	wm_put_swsm_semaphore(sc);
   13250 }
   13251 
   13252 static int
   13253 wm_get_phy_82575(struct wm_softc *sc)
   13254 {
   13255 
   13256 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13257 		device_xname(sc->sc_dev), __func__));
   13258 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13259 }
   13260 
   13261 static void
   13262 wm_put_phy_82575(struct wm_softc *sc)
   13263 {
   13264 
   13265 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13266 		device_xname(sc->sc_dev), __func__));
   13267 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13268 }
   13269 
   13270 static int
   13271 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13272 {
   13273 	uint32_t ext_ctrl;
   13274 	int timeout = 200;
   13275 
   13276 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13277 		device_xname(sc->sc_dev), __func__));
   13278 
   13279 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13280 	for (timeout = 0; timeout < 200; timeout++) {
   13281 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13282 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13283 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13284 
   13285 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13286 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13287 			return 0;
   13288 		delay(5000);
   13289 	}
   13290 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13291 	    device_xname(sc->sc_dev), ext_ctrl);
   13292 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13293 	return 1;
   13294 }
   13295 
   13296 static void
   13297 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13298 {
   13299 	uint32_t ext_ctrl;
   13300 
   13301 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13302 		device_xname(sc->sc_dev), __func__));
   13303 
   13304 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13305 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13306 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13307 
   13308 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13309 }
   13310 
   13311 static int
   13312 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13313 {
   13314 	uint32_t ext_ctrl;
   13315 	int timeout;
   13316 
   13317 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13318 		device_xname(sc->sc_dev), __func__));
   13319 	mutex_enter(sc->sc_ich_phymtx);
   13320 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13321 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13322 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13323 			break;
   13324 		delay(1000);
   13325 	}
   13326 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13327 		printf("%s: SW has already locked the resource\n",
   13328 		    device_xname(sc->sc_dev));
   13329 		goto out;
   13330 	}
   13331 
   13332 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13333 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13334 	for (timeout = 0; timeout < 1000; timeout++) {
   13335 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13336 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13337 			break;
   13338 		delay(1000);
   13339 	}
   13340 	if (timeout >= 1000) {
   13341 		printf("%s: failed to acquire semaphore\n",
   13342 		    device_xname(sc->sc_dev));
   13343 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13344 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13345 		goto out;
   13346 	}
   13347 	return 0;
   13348 
   13349 out:
   13350 	mutex_exit(sc->sc_ich_phymtx);
   13351 	return 1;
   13352 }
   13353 
   13354 static void
   13355 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13356 {
   13357 	uint32_t ext_ctrl;
   13358 
   13359 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13360 		device_xname(sc->sc_dev), __func__));
   13361 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13362 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13363 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13364 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13365 	} else {
   13366 		printf("%s: Semaphore unexpectedly released\n",
   13367 		    device_xname(sc->sc_dev));
   13368 	}
   13369 
   13370 	mutex_exit(sc->sc_ich_phymtx);
   13371 }
   13372 
   13373 static int
   13374 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13375 {
   13376 
   13377 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13378 		device_xname(sc->sc_dev), __func__));
   13379 	mutex_enter(sc->sc_ich_nvmmtx);
   13380 
   13381 	return 0;
   13382 }
   13383 
   13384 static void
   13385 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13386 {
   13387 
   13388 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13389 		device_xname(sc->sc_dev), __func__));
   13390 	mutex_exit(sc->sc_ich_nvmmtx);
   13391 }
   13392 
   13393 static int
   13394 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13395 {
   13396 	int i = 0;
   13397 	uint32_t reg;
   13398 
   13399 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13400 		device_xname(sc->sc_dev), __func__));
   13401 
   13402 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13403 	do {
   13404 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13405 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13406 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13407 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13408 			break;
   13409 		delay(2*1000);
   13410 		i++;
   13411 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13412 
   13413 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13414 		wm_put_hw_semaphore_82573(sc);
   13415 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13416 		    device_xname(sc->sc_dev));
   13417 		return -1;
   13418 	}
   13419 
   13420 	return 0;
   13421 }
   13422 
   13423 static void
   13424 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13425 {
   13426 	uint32_t reg;
   13427 
   13428 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13429 		device_xname(sc->sc_dev), __func__));
   13430 
   13431 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13432 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13433 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13434 }
   13435 
   13436 /*
   13437  * Management mode and power management related subroutines.
   13438  * BMC, AMT, suspend/resume and EEE.
   13439  */
   13440 
   13441 #ifdef WM_WOL
   13442 static int
   13443 wm_check_mng_mode(struct wm_softc *sc)
   13444 {
   13445 	int rv;
   13446 
   13447 	switch (sc->sc_type) {
   13448 	case WM_T_ICH8:
   13449 	case WM_T_ICH9:
   13450 	case WM_T_ICH10:
   13451 	case WM_T_PCH:
   13452 	case WM_T_PCH2:
   13453 	case WM_T_PCH_LPT:
   13454 	case WM_T_PCH_SPT:
   13455 	case WM_T_PCH_CNP:
   13456 		rv = wm_check_mng_mode_ich8lan(sc);
   13457 		break;
   13458 	case WM_T_82574:
   13459 	case WM_T_82583:
   13460 		rv = wm_check_mng_mode_82574(sc);
   13461 		break;
   13462 	case WM_T_82571:
   13463 	case WM_T_82572:
   13464 	case WM_T_82573:
   13465 	case WM_T_80003:
   13466 		rv = wm_check_mng_mode_generic(sc);
   13467 		break;
   13468 	default:
   13469 		/* noting to do */
   13470 		rv = 0;
   13471 		break;
   13472 	}
   13473 
   13474 	return rv;
   13475 }
   13476 
   13477 static int
   13478 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13479 {
   13480 	uint32_t fwsm;
   13481 
   13482 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13483 
   13484 	if (((fwsm & FWSM_FW_VALID) != 0)
   13485 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13486 		return 1;
   13487 
   13488 	return 0;
   13489 }
   13490 
   13491 static int
   13492 wm_check_mng_mode_82574(struct wm_softc *sc)
   13493 {
   13494 	uint16_t data;
   13495 
   13496 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13497 
   13498 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13499 		return 1;
   13500 
   13501 	return 0;
   13502 }
   13503 
   13504 static int
   13505 wm_check_mng_mode_generic(struct wm_softc *sc)
   13506 {
   13507 	uint32_t fwsm;
   13508 
   13509 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13510 
   13511 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13512 		return 1;
   13513 
   13514 	return 0;
   13515 }
   13516 #endif /* WM_WOL */
   13517 
   13518 static int
   13519 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13520 {
   13521 	uint32_t manc, fwsm, factps;
   13522 
   13523 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13524 		return 0;
   13525 
   13526 	manc = CSR_READ(sc, WMREG_MANC);
   13527 
   13528 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13529 		device_xname(sc->sc_dev), manc));
   13530 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13531 		return 0;
   13532 
   13533 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13534 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13535 		factps = CSR_READ(sc, WMREG_FACTPS);
   13536 		if (((factps & FACTPS_MNGCG) == 0)
   13537 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13538 			return 1;
   13539 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13540 		uint16_t data;
   13541 
   13542 		factps = CSR_READ(sc, WMREG_FACTPS);
   13543 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13544 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13545 			device_xname(sc->sc_dev), factps, data));
   13546 		if (((factps & FACTPS_MNGCG) == 0)
   13547 		    && ((data & NVM_CFG2_MNGM_MASK)
   13548 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13549 			return 1;
   13550 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13551 	    && ((manc & MANC_ASF_EN) == 0))
   13552 		return 1;
   13553 
   13554 	return 0;
   13555 }
   13556 
   13557 static bool
   13558 wm_phy_resetisblocked(struct wm_softc *sc)
   13559 {
   13560 	bool blocked = false;
   13561 	uint32_t reg;
   13562 	int i = 0;
   13563 
   13564 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13565 		device_xname(sc->sc_dev), __func__));
   13566 
   13567 	switch (sc->sc_type) {
   13568 	case WM_T_ICH8:
   13569 	case WM_T_ICH9:
   13570 	case WM_T_ICH10:
   13571 	case WM_T_PCH:
   13572 	case WM_T_PCH2:
   13573 	case WM_T_PCH_LPT:
   13574 	case WM_T_PCH_SPT:
   13575 	case WM_T_PCH_CNP:
   13576 		do {
   13577 			reg = CSR_READ(sc, WMREG_FWSM);
   13578 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13579 				blocked = true;
   13580 				delay(10*1000);
   13581 				continue;
   13582 			}
   13583 			blocked = false;
   13584 		} while (blocked && (i++ < 30));
   13585 		return blocked;
   13586 		break;
   13587 	case WM_T_82571:
   13588 	case WM_T_82572:
   13589 	case WM_T_82573:
   13590 	case WM_T_82574:
   13591 	case WM_T_82583:
   13592 	case WM_T_80003:
   13593 		reg = CSR_READ(sc, WMREG_MANC);
   13594 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13595 			return true;
   13596 		else
   13597 			return false;
   13598 		break;
   13599 	default:
   13600 		/* no problem */
   13601 		break;
   13602 	}
   13603 
   13604 	return false;
   13605 }
   13606 
   13607 static void
   13608 wm_get_hw_control(struct wm_softc *sc)
   13609 {
   13610 	uint32_t reg;
   13611 
   13612 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13613 		device_xname(sc->sc_dev), __func__));
   13614 
   13615 	if (sc->sc_type == WM_T_82573) {
   13616 		reg = CSR_READ(sc, WMREG_SWSM);
   13617 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13618 	} else if (sc->sc_type >= WM_T_82571) {
   13619 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13620 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13621 	}
   13622 }
   13623 
   13624 static void
   13625 wm_release_hw_control(struct wm_softc *sc)
   13626 {
   13627 	uint32_t reg;
   13628 
   13629 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13630 		device_xname(sc->sc_dev), __func__));
   13631 
   13632 	if (sc->sc_type == WM_T_82573) {
   13633 		reg = CSR_READ(sc, WMREG_SWSM);
   13634 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13635 	} else if (sc->sc_type >= WM_T_82571) {
   13636 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13637 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13638 	}
   13639 }
   13640 
   13641 static void
   13642 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13643 {
   13644 	uint32_t reg;
   13645 
   13646 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13647 		device_xname(sc->sc_dev), __func__));
   13648 
   13649 	if (sc->sc_type < WM_T_PCH2)
   13650 		return;
   13651 
   13652 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13653 
   13654 	if (gate)
   13655 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13656 	else
   13657 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13658 
   13659 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13660 }
   13661 
   13662 static void
   13663 wm_smbustopci(struct wm_softc *sc)
   13664 {
   13665 	uint32_t fwsm, reg;
   13666 	int rv = 0;
   13667 
   13668 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13669 		device_xname(sc->sc_dev), __func__));
   13670 
   13671 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13672 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13673 
   13674 	/* Disable ULP */
   13675 	wm_ulp_disable(sc);
   13676 
   13677 	/* Acquire PHY semaphore */
   13678 	sc->phy.acquire(sc);
   13679 
   13680 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13681 	switch (sc->sc_type) {
   13682 	case WM_T_PCH_LPT:
   13683 	case WM_T_PCH_SPT:
   13684 	case WM_T_PCH_CNP:
   13685 		if (wm_phy_is_accessible_pchlan(sc))
   13686 			break;
   13687 
   13688 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13689 		reg |= CTRL_EXT_FORCE_SMBUS;
   13690 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13691 #if 0
   13692 		/* XXX Isn't this required??? */
   13693 		CSR_WRITE_FLUSH(sc);
   13694 #endif
   13695 		delay(50 * 1000);
   13696 		/* FALLTHROUGH */
   13697 	case WM_T_PCH2:
   13698 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13699 			break;
   13700 		/* FALLTHROUGH */
   13701 	case WM_T_PCH:
   13702 		if (sc->sc_type == WM_T_PCH)
   13703 			if ((fwsm & FWSM_FW_VALID) != 0)
   13704 				break;
   13705 
   13706 		if (wm_phy_resetisblocked(sc) == true) {
   13707 			printf("XXX reset is blocked(3)\n");
   13708 			break;
   13709 		}
   13710 
   13711 		wm_toggle_lanphypc_pch_lpt(sc);
   13712 
   13713 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13714 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13715 				break;
   13716 
   13717 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13718 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13719 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13720 
   13721 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13722 				break;
   13723 			rv = -1;
   13724 		}
   13725 		break;
   13726 	default:
   13727 		break;
   13728 	}
   13729 
   13730 	/* Release semaphore */
   13731 	sc->phy.release(sc);
   13732 
   13733 	if (rv == 0) {
   13734 		if (wm_phy_resetisblocked(sc)) {
   13735 			printf("XXX reset is blocked(4)\n");
   13736 			goto out;
   13737 		}
   13738 		wm_reset_phy(sc);
   13739 		if (wm_phy_resetisblocked(sc))
   13740 			printf("XXX reset is blocked(4)\n");
   13741 	}
   13742 
   13743 out:
   13744 	/*
   13745 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13746 	 */
   13747 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13748 		delay(10*1000);
   13749 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13750 	}
   13751 }
   13752 
   13753 static void
   13754 wm_init_manageability(struct wm_softc *sc)
   13755 {
   13756 
   13757 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13758 		device_xname(sc->sc_dev), __func__));
   13759 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13760 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13761 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13762 
   13763 		/* Disable hardware interception of ARP */
   13764 		manc &= ~MANC_ARP_EN;
   13765 
   13766 		/* Enable receiving management packets to the host */
   13767 		if (sc->sc_type >= WM_T_82571) {
   13768 			manc |= MANC_EN_MNG2HOST;
   13769 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   13770 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13771 		}
   13772 
   13773 		CSR_WRITE(sc, WMREG_MANC, manc);
   13774 	}
   13775 }
   13776 
   13777 static void
   13778 wm_release_manageability(struct wm_softc *sc)
   13779 {
   13780 
   13781 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13782 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13783 
   13784 		manc |= MANC_ARP_EN;
   13785 		if (sc->sc_type >= WM_T_82571)
   13786 			manc &= ~MANC_EN_MNG2HOST;
   13787 
   13788 		CSR_WRITE(sc, WMREG_MANC, manc);
   13789 	}
   13790 }
   13791 
   13792 static void
   13793 wm_get_wakeup(struct wm_softc *sc)
   13794 {
   13795 
   13796 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13797 	switch (sc->sc_type) {
   13798 	case WM_T_82573:
   13799 	case WM_T_82583:
   13800 		sc->sc_flags |= WM_F_HAS_AMT;
   13801 		/* FALLTHROUGH */
   13802 	case WM_T_80003:
   13803 	case WM_T_82575:
   13804 	case WM_T_82576:
   13805 	case WM_T_82580:
   13806 	case WM_T_I350:
   13807 	case WM_T_I354:
   13808 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13809 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13810 		/* FALLTHROUGH */
   13811 	case WM_T_82541:
   13812 	case WM_T_82541_2:
   13813 	case WM_T_82547:
   13814 	case WM_T_82547_2:
   13815 	case WM_T_82571:
   13816 	case WM_T_82572:
   13817 	case WM_T_82574:
   13818 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13819 		break;
   13820 	case WM_T_ICH8:
   13821 	case WM_T_ICH9:
   13822 	case WM_T_ICH10:
   13823 	case WM_T_PCH:
   13824 	case WM_T_PCH2:
   13825 	case WM_T_PCH_LPT:
   13826 	case WM_T_PCH_SPT:
   13827 	case WM_T_PCH_CNP:
   13828 		sc->sc_flags |= WM_F_HAS_AMT;
   13829 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13830 		break;
   13831 	default:
   13832 		break;
   13833 	}
   13834 
   13835 	/* 1: HAS_MANAGE */
   13836 	if (wm_enable_mng_pass_thru(sc) != 0)
   13837 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13838 
   13839 	/*
   13840 	 * Note that the WOL flags is set after the resetting of the eeprom
   13841 	 * stuff
   13842 	 */
   13843 }
   13844 
   13845 /*
   13846  * Unconfigure Ultra Low Power mode.
   13847  * Only for I217 and newer (see below).
   13848  */
   13849 static void
   13850 wm_ulp_disable(struct wm_softc *sc)
   13851 {
   13852 	uint32_t reg;
   13853 	int i = 0;
   13854 
   13855 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13856 		device_xname(sc->sc_dev), __func__));
   13857 	/* Exclude old devices */
   13858 	if ((sc->sc_type < WM_T_PCH_LPT)
   13859 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13860 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13861 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13862 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13863 		return;
   13864 
   13865 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13866 		/* Request ME un-configure ULP mode in the PHY */
   13867 		reg = CSR_READ(sc, WMREG_H2ME);
   13868 		reg &= ~H2ME_ULP;
   13869 		reg |= H2ME_ENFORCE_SETTINGS;
   13870 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13871 
   13872 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13873 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13874 			if (i++ == 30) {
   13875 				printf("%s timed out\n", __func__);
   13876 				return;
   13877 			}
   13878 			delay(10 * 1000);
   13879 		}
   13880 		reg = CSR_READ(sc, WMREG_H2ME);
   13881 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13882 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13883 
   13884 		return;
   13885 	}
   13886 
   13887 	/* Acquire semaphore */
   13888 	sc->phy.acquire(sc);
   13889 
   13890 	/* Toggle LANPHYPC */
   13891 	wm_toggle_lanphypc_pch_lpt(sc);
   13892 
   13893 	/* Unforce SMBus mode in PHY */
   13894 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13895 	if (reg == 0x0000 || reg == 0xffff) {
   13896 		uint32_t reg2;
   13897 
   13898 		printf("%s: Force SMBus first.\n", __func__);
   13899 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13900 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13901 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13902 		delay(50 * 1000);
   13903 
   13904 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13905 	}
   13906 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13907 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13908 
   13909 	/* Unforce SMBus mode in MAC */
   13910 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13911 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13912 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13913 
   13914 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13915 	reg |= HV_PM_CTRL_K1_ENA;
   13916 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13917 
   13918 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13919 	reg &= ~(I218_ULP_CONFIG1_IND
   13920 	    | I218_ULP_CONFIG1_STICKY_ULP
   13921 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13922 	    | I218_ULP_CONFIG1_WOL_HOST
   13923 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13924 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13925 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13926 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13927 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13928 	reg |= I218_ULP_CONFIG1_START;
   13929 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13930 
   13931 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13932 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13933 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13934 
   13935 	/* Release semaphore */
   13936 	sc->phy.release(sc);
   13937 	wm_gmii_reset(sc);
   13938 	delay(50 * 1000);
   13939 }
   13940 
   13941 /* WOL in the newer chipset interfaces (pchlan) */
   13942 static void
   13943 wm_enable_phy_wakeup(struct wm_softc *sc)
   13944 {
   13945 #if 0
   13946 	uint16_t preg;
   13947 
   13948 	/* Copy MAC RARs to PHY RARs */
   13949 
   13950 	/* Copy MAC MTA to PHY MTA */
   13951 
   13952 	/* Configure PHY Rx Control register */
   13953 
   13954 	/* Enable PHY wakeup in MAC register */
   13955 
   13956 	/* Configure and enable PHY wakeup in PHY registers */
   13957 
   13958 	/* Activate PHY wakeup */
   13959 
   13960 	/* XXX */
   13961 #endif
   13962 }
   13963 
   13964 /* Power down workaround on D3 */
   13965 static void
   13966 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13967 {
   13968 	uint32_t reg;
   13969 	int i;
   13970 
   13971 	for (i = 0; i < 2; i++) {
   13972 		/* Disable link */
   13973 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13974 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13975 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13976 
   13977 		/*
   13978 		 * Call gig speed drop workaround on Gig disable before
   13979 		 * accessing any PHY registers
   13980 		 */
   13981 		if (sc->sc_type == WM_T_ICH8)
   13982 			wm_gig_downshift_workaround_ich8lan(sc);
   13983 
   13984 		/* Write VR power-down enable */
   13985 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13986 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13987 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13988 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13989 
   13990 		/* Read it back and test */
   13991 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13992 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13993 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13994 			break;
   13995 
   13996 		/* Issue PHY reset and repeat at most one more time */
   13997 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13998 	}
   13999 }
   14000 
   14001 static void
   14002 wm_enable_wakeup(struct wm_softc *sc)
   14003 {
   14004 	uint32_t reg, pmreg;
   14005 	pcireg_t pmode;
   14006 
   14007 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14008 		device_xname(sc->sc_dev), __func__));
   14009 
   14010 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14011 		&pmreg, NULL) == 0)
   14012 		return;
   14013 
   14014 	/* Advertise the wakeup capability */
   14015 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   14016 	    | CTRL_SWDPIN(3));
   14017 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   14018 
   14019 	/* ICH workaround */
   14020 	switch (sc->sc_type) {
   14021 	case WM_T_ICH8:
   14022 	case WM_T_ICH9:
   14023 	case WM_T_ICH10:
   14024 	case WM_T_PCH:
   14025 	case WM_T_PCH2:
   14026 	case WM_T_PCH_LPT:
   14027 	case WM_T_PCH_SPT:
   14028 	case WM_T_PCH_CNP:
   14029 		/* Disable gig during WOL */
   14030 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14031 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   14032 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14033 		if (sc->sc_type == WM_T_PCH)
   14034 			wm_gmii_reset(sc);
   14035 
   14036 		/* Power down workaround */
   14037 		if (sc->sc_phytype == WMPHY_82577) {
   14038 			struct mii_softc *child;
   14039 
   14040 			/* Assume that the PHY is copper */
   14041 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14042 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   14043 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   14044 				    (768 << 5) | 25, 0x0444); /* magic num */
   14045 		}
   14046 		break;
   14047 	default:
   14048 		break;
   14049 	}
   14050 
   14051 	/* Keep the laser running on fiber adapters */
   14052 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   14053 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   14054 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14055 		reg |= CTRL_EXT_SWDPIN(3);
   14056 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14057 	}
   14058 
   14059 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   14060 #if 0	/* for the multicast packet */
   14061 	reg |= WUFC_MC;
   14062 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   14063 #endif
   14064 
   14065 	if (sc->sc_type >= WM_T_PCH)
   14066 		wm_enable_phy_wakeup(sc);
   14067 	else {
   14068 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   14069 		CSR_WRITE(sc, WMREG_WUFC, reg);
   14070 	}
   14071 
   14072 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14073 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14074 		|| (sc->sc_type == WM_T_PCH2))
   14075 	    && (sc->sc_phytype == WMPHY_IGP_3))
   14076 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   14077 
   14078 	/* Request PME */
   14079 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   14080 #if 0
   14081 	/* Disable WOL */
   14082 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   14083 #else
   14084 	/* For WOL */
   14085 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   14086 #endif
   14087 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   14088 }
   14089 
   14090 /* Disable ASPM L0s and/or L1 for workaround */
   14091 static void
   14092 wm_disable_aspm(struct wm_softc *sc)
   14093 {
   14094 	pcireg_t reg, mask = 0;
   14095 	unsigned const char *str = "";
   14096 
   14097 	/*
   14098 	 *  Only for PCIe device which has PCIe capability in the PCI config
   14099 	 * space.
   14100 	 */
   14101 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   14102 		return;
   14103 
   14104 	switch (sc->sc_type) {
   14105 	case WM_T_82571:
   14106 	case WM_T_82572:
   14107 		/*
   14108 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   14109 		 * State Power management L1 State (ASPM L1).
   14110 		 */
   14111 		mask = PCIE_LCSR_ASPM_L1;
   14112 		str = "L1 is";
   14113 		break;
   14114 	case WM_T_82573:
   14115 	case WM_T_82574:
   14116 	case WM_T_82583:
   14117 		/*
   14118 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   14119 		 *
   14120 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   14121 		 * some chipset.  The document of 82574 and 82583 says that
   14122 		 * disabling L0s with some specific chipset is sufficient,
   14123 		 * but we follow as of the Intel em driver does.
   14124 		 *
   14125 		 * References:
   14126 		 * Errata 8 of the Specification Update of i82573.
   14127 		 * Errata 20 of the Specification Update of i82574.
   14128 		 * Errata 9 of the Specification Update of i82583.
   14129 		 */
   14130 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   14131 		str = "L0s and L1 are";
   14132 		break;
   14133 	default:
   14134 		return;
   14135 	}
   14136 
   14137 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14138 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   14139 	reg &= ~mask;
   14140 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14141 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   14142 
   14143 	/* Print only in wm_attach() */
   14144 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   14145 		aprint_verbose_dev(sc->sc_dev,
   14146 		    "ASPM %s disabled to workaround the errata.\n", str);
   14147 }
   14148 
   14149 /* LPLU */
   14150 
   14151 static void
   14152 wm_lplu_d0_disable(struct wm_softc *sc)
   14153 {
   14154 	struct mii_data *mii = &sc->sc_mii;
   14155 	uint32_t reg;
   14156 
   14157 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14158 		device_xname(sc->sc_dev), __func__));
   14159 
   14160 	if (sc->sc_phytype == WMPHY_IFE)
   14161 		return;
   14162 
   14163 	switch (sc->sc_type) {
   14164 	case WM_T_82571:
   14165 	case WM_T_82572:
   14166 	case WM_T_82573:
   14167 	case WM_T_82575:
   14168 	case WM_T_82576:
   14169 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   14170 		reg &= ~PMR_D0_LPLU;
   14171 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   14172 		break;
   14173 	case WM_T_82580:
   14174 	case WM_T_I350:
   14175 	case WM_T_I210:
   14176 	case WM_T_I211:
   14177 		reg = CSR_READ(sc, WMREG_PHPM);
   14178 		reg &= ~PHPM_D0A_LPLU;
   14179 		CSR_WRITE(sc, WMREG_PHPM, reg);
   14180 		break;
   14181 	case WM_T_82574:
   14182 	case WM_T_82583:
   14183 	case WM_T_ICH8:
   14184 	case WM_T_ICH9:
   14185 	case WM_T_ICH10:
   14186 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14187 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   14188 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14189 		CSR_WRITE_FLUSH(sc);
   14190 		break;
   14191 	case WM_T_PCH:
   14192 	case WM_T_PCH2:
   14193 	case WM_T_PCH_LPT:
   14194 	case WM_T_PCH_SPT:
   14195 	case WM_T_PCH_CNP:
   14196 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   14197 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   14198 		if (wm_phy_resetisblocked(sc) == false)
   14199 			reg |= HV_OEM_BITS_ANEGNOW;
   14200 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   14201 		break;
   14202 	default:
   14203 		break;
   14204 	}
   14205 }
   14206 
   14207 /* EEE */
   14208 
   14209 static void
   14210 wm_set_eee_i350(struct wm_softc *sc)
   14211 {
   14212 	uint32_t ipcnfg, eeer;
   14213 
   14214 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   14215 	eeer = CSR_READ(sc, WMREG_EEER);
   14216 
   14217 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   14218 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14219 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14220 		    | EEER_LPI_FC);
   14221 	} else {
   14222 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14223 		ipcnfg &= ~IPCNFG_10BASE_TE;
   14224 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14225 		    | EEER_LPI_FC);
   14226 	}
   14227 
   14228 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   14229 	CSR_WRITE(sc, WMREG_EEER, eeer);
   14230 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   14231 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   14232 }
   14233 
   14234 /*
   14235  * Workarounds (mainly PHY related).
   14236  * Basically, PHY's workarounds are in the PHY drivers.
   14237  */
   14238 
   14239 /* Work-around for 82566 Kumeran PCS lock loss */
   14240 static void
   14241 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   14242 {
   14243 	struct mii_data *mii = &sc->sc_mii;
   14244 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14245 	int i;
   14246 	int reg;
   14247 
   14248 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14249 		device_xname(sc->sc_dev), __func__));
   14250 
   14251 	/* If the link is not up, do nothing */
   14252 	if ((status & STATUS_LU) == 0)
   14253 		return;
   14254 
   14255 	/* Nothing to do if the link is other than 1Gbps */
   14256 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   14257 		return;
   14258 
   14259 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14260 	for (i = 0; i < 10; i++) {
   14261 		/* read twice */
   14262 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14263 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14264 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   14265 			goto out;	/* GOOD! */
   14266 
   14267 		/* Reset the PHY */
   14268 		wm_reset_phy(sc);
   14269 		delay(5*1000);
   14270 	}
   14271 
   14272 	/* Disable GigE link negotiation */
   14273 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14274 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14275 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14276 
   14277 	/*
   14278 	 * Call gig speed drop workaround on Gig disable before accessing
   14279 	 * any PHY registers.
   14280 	 */
   14281 	wm_gig_downshift_workaround_ich8lan(sc);
   14282 
   14283 out:
   14284 	return;
   14285 }
   14286 
   14287 /* WOL from S5 stops working */
   14288 static void
   14289 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   14290 {
   14291 	uint16_t kmreg;
   14292 
   14293 	/* Only for igp3 */
   14294 	if (sc->sc_phytype == WMPHY_IGP_3) {
   14295 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   14296 			return;
   14297 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   14298 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   14299 			return;
   14300 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   14301 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   14302 	}
   14303 }
   14304 
   14305 /*
   14306  * Workaround for pch's PHYs
   14307  * XXX should be moved to new PHY driver?
   14308  */
   14309 static void
   14310 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   14311 {
   14312 
   14313 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14314 		device_xname(sc->sc_dev), __func__));
   14315 	KASSERT(sc->sc_type == WM_T_PCH);
   14316 
   14317 	if (sc->sc_phytype == WMPHY_82577)
   14318 		wm_set_mdio_slow_mode_hv(sc);
   14319 
   14320 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   14321 
   14322 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   14323 
   14324 	/* 82578 */
   14325 	if (sc->sc_phytype == WMPHY_82578) {
   14326 		struct mii_softc *child;
   14327 
   14328 		/*
   14329 		 * Return registers to default by doing a soft reset then
   14330 		 * writing 0x3140 to the control register
   14331 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   14332 		 */
   14333 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14334 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   14335 			PHY_RESET(child);
   14336 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   14337 			    0x3140);
   14338 		}
   14339 	}
   14340 
   14341 	/* Select page 0 */
   14342 	sc->phy.acquire(sc);
   14343 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   14344 	sc->phy.release(sc);
   14345 
   14346 	/*
   14347 	 * Configure the K1 Si workaround during phy reset assuming there is
   14348 	 * link so that it disables K1 if link is in 1Gbps.
   14349 	 */
   14350 	wm_k1_gig_workaround_hv(sc, 1);
   14351 }
   14352 
   14353 static void
   14354 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   14355 {
   14356 
   14357 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14358 		device_xname(sc->sc_dev), __func__));
   14359 	KASSERT(sc->sc_type == WM_T_PCH2);
   14360 
   14361 	wm_set_mdio_slow_mode_hv(sc);
   14362 }
   14363 
   14364 static int
   14365 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   14366 {
   14367 	int k1_enable = sc->sc_nvm_k1_enabled;
   14368 
   14369 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14370 		device_xname(sc->sc_dev), __func__));
   14371 
   14372 	if (sc->phy.acquire(sc) != 0)
   14373 		return -1;
   14374 
   14375 	if (link) {
   14376 		k1_enable = 0;
   14377 
   14378 		/* Link stall fix for link up */
   14379 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14380 		    0x0100);
   14381 	} else {
   14382 		/* Link stall fix for link down */
   14383 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14384 		    0x4100);
   14385 	}
   14386 
   14387 	wm_configure_k1_ich8lan(sc, k1_enable);
   14388 	sc->phy.release(sc);
   14389 
   14390 	return 0;
   14391 }
   14392 
   14393 static void
   14394 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   14395 {
   14396 	uint32_t reg;
   14397 
   14398 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   14399 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   14400 	    reg | HV_KMRN_MDIO_SLOW);
   14401 }
   14402 
   14403 static void
   14404 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14405 {
   14406 	uint32_t ctrl, ctrl_ext, tmp;
   14407 	uint16_t kmreg;
   14408 	int rv;
   14409 
   14410 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14411 	if (rv != 0)
   14412 		return;
   14413 
   14414 	if (k1_enable)
   14415 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14416 	else
   14417 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14418 
   14419 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14420 	if (rv != 0)
   14421 		return;
   14422 
   14423 	delay(20);
   14424 
   14425 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14426 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14427 
   14428 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14429 	tmp |= CTRL_FRCSPD;
   14430 
   14431 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14432 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14433 	CSR_WRITE_FLUSH(sc);
   14434 	delay(20);
   14435 
   14436 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14437 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14438 	CSR_WRITE_FLUSH(sc);
   14439 	delay(20);
   14440 
   14441 	return;
   14442 }
   14443 
   14444 /* special case - for 82575 - need to do manual init ... */
   14445 static void
   14446 wm_reset_init_script_82575(struct wm_softc *sc)
   14447 {
   14448 	/*
   14449 	 * remark: this is untested code - we have no board without EEPROM
   14450 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14451 	 */
   14452 
   14453 	/* SerDes configuration via SERDESCTRL */
   14454 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14455 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14456 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14457 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14458 
   14459 	/* CCM configuration via CCMCTL register */
   14460 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14461 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14462 
   14463 	/* PCIe lanes configuration */
   14464 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14465 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14466 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14467 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14468 
   14469 	/* PCIe PLL Configuration */
   14470 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14471 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14472 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14473 }
   14474 
   14475 static void
   14476 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14477 {
   14478 	uint32_t reg;
   14479 	uint16_t nvmword;
   14480 	int rv;
   14481 
   14482 	if (sc->sc_type != WM_T_82580)
   14483 		return;
   14484 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14485 		return;
   14486 
   14487 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14488 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14489 	if (rv != 0) {
   14490 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14491 		    __func__);
   14492 		return;
   14493 	}
   14494 
   14495 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14496 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14497 		reg |= MDICNFG_DEST;
   14498 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14499 		reg |= MDICNFG_COM_MDIO;
   14500 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14501 }
   14502 
   14503 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14504 
   14505 static bool
   14506 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14507 {
   14508 	int i;
   14509 	uint32_t reg;
   14510 	uint16_t id1, id2;
   14511 
   14512 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14513 		device_xname(sc->sc_dev), __func__));
   14514 	id1 = id2 = 0xffff;
   14515 	for (i = 0; i < 2; i++) {
   14516 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   14517 		if (MII_INVALIDID(id1))
   14518 			continue;
   14519 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   14520 		if (MII_INVALIDID(id2))
   14521 			continue;
   14522 		break;
   14523 	}
   14524 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   14525 		goto out;
   14526 	}
   14527 
   14528 	if (sc->sc_type < WM_T_PCH_LPT) {
   14529 		sc->phy.release(sc);
   14530 		wm_set_mdio_slow_mode_hv(sc);
   14531 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14532 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14533 		sc->phy.acquire(sc);
   14534 	}
   14535 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14536 		printf("XXX return with false\n");
   14537 		return false;
   14538 	}
   14539 out:
   14540 	if (sc->sc_type >= WM_T_PCH_LPT) {
   14541 		/* Only unforce SMBus if ME is not active */
   14542 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14543 			/* Unforce SMBus mode in PHY */
   14544 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14545 			    CV_SMB_CTRL);
   14546 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14547 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14548 			    CV_SMB_CTRL, reg);
   14549 
   14550 			/* Unforce SMBus mode in MAC */
   14551 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14552 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14553 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14554 		}
   14555 	}
   14556 	return true;
   14557 }
   14558 
   14559 static void
   14560 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14561 {
   14562 	uint32_t reg;
   14563 	int i;
   14564 
   14565 	/* Set PHY Config Counter to 50msec */
   14566 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14567 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14568 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14569 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14570 
   14571 	/* Toggle LANPHYPC */
   14572 	reg = CSR_READ(sc, WMREG_CTRL);
   14573 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14574 	reg &= ~CTRL_LANPHYPC_VALUE;
   14575 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14576 	CSR_WRITE_FLUSH(sc);
   14577 	delay(1000);
   14578 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14579 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14580 	CSR_WRITE_FLUSH(sc);
   14581 
   14582 	if (sc->sc_type < WM_T_PCH_LPT)
   14583 		delay(50 * 1000);
   14584 	else {
   14585 		i = 20;
   14586 
   14587 		do {
   14588 			delay(5 * 1000);
   14589 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14590 		    && i--);
   14591 
   14592 		delay(30 * 1000);
   14593 	}
   14594 }
   14595 
   14596 static int
   14597 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14598 {
   14599 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14600 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14601 	uint32_t rxa;
   14602 	uint16_t scale = 0, lat_enc = 0;
   14603 	int32_t obff_hwm = 0;
   14604 	int64_t lat_ns, value;
   14605 
   14606 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14607 		device_xname(sc->sc_dev), __func__));
   14608 
   14609 	if (link) {
   14610 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14611 		uint32_t status;
   14612 		uint16_t speed;
   14613 		pcireg_t preg;
   14614 
   14615 		status = CSR_READ(sc, WMREG_STATUS);
   14616 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14617 		case STATUS_SPEED_10:
   14618 			speed = 10;
   14619 			break;
   14620 		case STATUS_SPEED_100:
   14621 			speed = 100;
   14622 			break;
   14623 		case STATUS_SPEED_1000:
   14624 			speed = 1000;
   14625 			break;
   14626 		default:
   14627 			device_printf(sc->sc_dev, "Unknown speed "
   14628 			    "(status = %08x)\n", status);
   14629 			return -1;
   14630 		}
   14631 
   14632 		/* Rx Packet Buffer Allocation size (KB) */
   14633 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14634 
   14635 		/*
   14636 		 * Determine the maximum latency tolerated by the device.
   14637 		 *
   14638 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14639 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14640 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14641 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14642 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14643 		 */
   14644 		lat_ns = ((int64_t)rxa * 1024 -
   14645 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14646 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14647 		if (lat_ns < 0)
   14648 			lat_ns = 0;
   14649 		else
   14650 			lat_ns /= speed;
   14651 		value = lat_ns;
   14652 
   14653 		while (value > LTRV_VALUE) {
   14654 			scale ++;
   14655 			value = howmany(value, __BIT(5));
   14656 		}
   14657 		if (scale > LTRV_SCALE_MAX) {
   14658 			printf("%s: Invalid LTR latency scale %d\n",
   14659 			    device_xname(sc->sc_dev), scale);
   14660 			return -1;
   14661 		}
   14662 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14663 
   14664 		/* Determine the maximum latency tolerated by the platform */
   14665 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14666 		    WM_PCI_LTR_CAP_LPT);
   14667 		max_snoop = preg & 0xffff;
   14668 		max_nosnoop = preg >> 16;
   14669 
   14670 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14671 
   14672 		if (lat_enc > max_ltr_enc) {
   14673 			lat_enc = max_ltr_enc;
   14674 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14675 			    * PCI_LTR_SCALETONS(
   14676 				    __SHIFTOUT(lat_enc,
   14677 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14678 		}
   14679 
   14680 		if (lat_ns) {
   14681 			lat_ns *= speed * 1000;
   14682 			lat_ns /= 8;
   14683 			lat_ns /= 1000000000;
   14684 			obff_hwm = (int32_t)(rxa - lat_ns);
   14685 		}
   14686 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14687 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14688 			    "(rxa = %d, lat_ns = %d)\n",
   14689 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14690 			return -1;
   14691 		}
   14692 	}
   14693 	/* Snoop and No-Snoop latencies the same */
   14694 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14695 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14696 
   14697 	/* Set OBFF high water mark */
   14698 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14699 	reg |= obff_hwm;
   14700 	CSR_WRITE(sc, WMREG_SVT, reg);
   14701 
   14702 	/* Enable OBFF */
   14703 	reg = CSR_READ(sc, WMREG_SVCR);
   14704 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14705 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14706 
   14707 	return 0;
   14708 }
   14709 
   14710 /*
   14711  * I210 Errata 25 and I211 Errata 10
   14712  * Slow System Clock.
   14713  */
   14714 static void
   14715 wm_pll_workaround_i210(struct wm_softc *sc)
   14716 {
   14717 	uint32_t mdicnfg, wuc;
   14718 	uint32_t reg;
   14719 	pcireg_t pcireg;
   14720 	uint32_t pmreg;
   14721 	uint16_t nvmword, tmp_nvmword;
   14722 	int phyval;
   14723 	bool wa_done = false;
   14724 	int i;
   14725 
   14726 	/* Save WUC and MDICNFG registers */
   14727 	wuc = CSR_READ(sc, WMREG_WUC);
   14728 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14729 
   14730 	reg = mdicnfg & ~MDICNFG_DEST;
   14731 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14732 
   14733 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14734 		nvmword = INVM_DEFAULT_AL;
   14735 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14736 
   14737 	/* Get Power Management cap offset */
   14738 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14739 		&pmreg, NULL) == 0)
   14740 		return;
   14741 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14742 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14743 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14744 
   14745 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14746 			break; /* OK */
   14747 		}
   14748 
   14749 		wa_done = true;
   14750 		/* Directly reset the internal PHY */
   14751 		reg = CSR_READ(sc, WMREG_CTRL);
   14752 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14753 
   14754 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14755 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14756 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14757 
   14758 		CSR_WRITE(sc, WMREG_WUC, 0);
   14759 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14760 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14761 
   14762 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14763 		    pmreg + PCI_PMCSR);
   14764 		pcireg |= PCI_PMCSR_STATE_D3;
   14765 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14766 		    pmreg + PCI_PMCSR, pcireg);
   14767 		delay(1000);
   14768 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14769 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14770 		    pmreg + PCI_PMCSR, pcireg);
   14771 
   14772 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14773 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14774 
   14775 		/* Restore WUC register */
   14776 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14777 	}
   14778 
   14779 	/* Restore MDICNFG setting */
   14780 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14781 	if (wa_done)
   14782 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14783 }
   14784 
   14785 static void
   14786 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14787 {
   14788 	uint32_t reg;
   14789 
   14790 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14791 		device_xname(sc->sc_dev), __func__));
   14792 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   14793 	    || (sc->sc_type == WM_T_PCH_CNP));
   14794 
   14795 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14796 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14797 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14798 
   14799 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14800 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14801 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14802 }
   14803