Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.625
      1 /*	$NetBSD: if_wm.c,v 1.625 2019/02/07 04:03:24 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.625 2019/02/07 04:03:24 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/mdio.h>
    133 #include <dev/mii/miivar.h>
    134 #include <dev/mii/miidevs.h>
    135 #include <dev/mii/mii_bitbang.h>
    136 #include <dev/mii/ikphyreg.h>
    137 #include <dev/mii/igphyreg.h>
    138 #include <dev/mii/igphyvar.h>
    139 #include <dev/mii/inbmphyreg.h>
    140 #include <dev/mii/ihphyreg.h>
    141 
    142 #include <dev/pci/pcireg.h>
    143 #include <dev/pci/pcivar.h>
    144 #include <dev/pci/pcidevs.h>
    145 
    146 #include <dev/pci/if_wmreg.h>
    147 #include <dev/pci/if_wmvar.h>
    148 
    149 #ifdef WM_DEBUG
    150 #define	WM_DEBUG_LINK		__BIT(0)
    151 #define	WM_DEBUG_TX		__BIT(1)
    152 #define	WM_DEBUG_RX		__BIT(2)
    153 #define	WM_DEBUG_GMII		__BIT(3)
    154 #define	WM_DEBUG_MANAGE		__BIT(4)
    155 #define	WM_DEBUG_NVM		__BIT(5)
    156 #define	WM_DEBUG_INIT		__BIT(6)
    157 #define	WM_DEBUG_LOCK		__BIT(7)
    158 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    159     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    160 
    161 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    162 #else
    163 #define	DPRINTF(x, y)	__nothing
    164 #endif /* WM_DEBUG */
    165 
    166 #ifdef NET_MPSAFE
    167 #define WM_MPSAFE	1
    168 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    169 #else
    170 #define CALLOUT_FLAGS	0
    171 #endif
    172 
    173 /*
    174  * This device driver's max interrupt numbers.
    175  */
    176 #define WM_MAX_NQUEUEINTR	16
    177 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    178 
    179 #ifndef WM_DISABLE_MSI
    180 #define	WM_DISABLE_MSI 0
    181 #endif
    182 #ifndef WM_DISABLE_MSIX
    183 #define	WM_DISABLE_MSIX 0
    184 #endif
    185 
    186 int wm_disable_msi = WM_DISABLE_MSI;
    187 int wm_disable_msix = WM_DISABLE_MSIX;
    188 
    189 #ifndef WM_WATCHDOG_TIMEOUT
    190 #define WM_WATCHDOG_TIMEOUT 5
    191 #endif
    192 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    193 
    194 /*
    195  * Transmit descriptor list size.  Due to errata, we can only have
    196  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    197  * on >= 82544. We tell the upper layers that they can queue a lot
    198  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    199  * of them at a time.
    200  *
    201  * We allow up to 64 DMA segments per packet.  Pathological packet
    202  * chains containing many small mbufs have been observed in zero-copy
    203  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    204  * m_defrag() is called to reduce it.
    205  */
    206 #define	WM_NTXSEGS		64
    207 #define	WM_IFQUEUELEN		256
    208 #define	WM_TXQUEUELEN_MAX	64
    209 #define	WM_TXQUEUELEN_MAX_82547	16
    210 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    211 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    212 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    213 #define	WM_NTXDESC_82542	256
    214 #define	WM_NTXDESC_82544	4096
    215 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    216 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    217 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    218 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    219 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    220 
    221 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    222 
    223 #define	WM_TXINTERQSIZE		256
    224 
    225 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    226 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    227 #endif
    228 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    229 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    230 #endif
    231 
    232 /*
    233  * Receive descriptor list size.  We have one Rx buffer for normal
    234  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    235  * packet.  We allocate 256 receive descriptors, each with a 2k
    236  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    237  */
    238 #define	WM_NRXDESC		256
    239 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    240 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    241 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    242 
    243 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    244 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    245 #endif
    246 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    247 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    248 #endif
    249 
    250 typedef union txdescs {
    251 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    252 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    253 } txdescs_t;
    254 
    255 typedef union rxdescs {
    256 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    257 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    258 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    259 } rxdescs_t;
    260 
    261 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    262 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    263 
    264 /*
    265  * Software state for transmit jobs.
    266  */
    267 struct wm_txsoft {
    268 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    269 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    270 	int txs_firstdesc;		/* first descriptor in packet */
    271 	int txs_lastdesc;		/* last descriptor in packet */
    272 	int txs_ndesc;			/* # of descriptors used */
    273 };
    274 
    275 /*
    276  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    277  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    278  * them together.
    279  */
    280 struct wm_rxsoft {
    281 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    282 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    283 };
    284 
    285 #define WM_LINKUP_TIMEOUT	50
    286 
    287 static uint16_t swfwphysem[] = {
    288 	SWFW_PHY0_SM,
    289 	SWFW_PHY1_SM,
    290 	SWFW_PHY2_SM,
    291 	SWFW_PHY3_SM
    292 };
    293 
    294 static const uint32_t wm_82580_rxpbs_table[] = {
    295 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    296 };
    297 
    298 struct wm_softc;
    299 
    300 #ifdef WM_EVENT_COUNTERS
    301 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    302 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    303 	struct evcnt qname##_ev_##evname;
    304 
    305 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    306 	do {								\
    307 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    308 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    309 		    "%s%02d%s", #qname, (qnum), #evname);		\
    310 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    311 		    (evtype), NULL, (xname),				\
    312 		    (q)->qname##_##evname##_evcnt_name);		\
    313 	} while (0)
    314 
    315 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    316 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    317 
    318 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    319 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    320 
    321 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    322 	evcnt_detach(&(q)->qname##_ev_##evname);
    323 #endif /* WM_EVENT_COUNTERS */
    324 
    325 struct wm_txqueue {
    326 	kmutex_t *txq_lock;		/* lock for tx operations */
    327 
    328 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    329 
    330 	/* Software state for the transmit descriptors. */
    331 	int txq_num;			/* must be a power of two */
    332 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    333 
    334 	/* TX control data structures. */
    335 	int txq_ndesc;			/* must be a power of two */
    336 	size_t txq_descsize;		/* a tx descriptor size */
    337 	txdescs_t *txq_descs_u;
    338 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    339 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    340 	int txq_desc_rseg;		/* real number of control segment */
    341 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    342 #define	txq_descs	txq_descs_u->sctxu_txdescs
    343 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    344 
    345 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    346 
    347 	int txq_free;			/* number of free Tx descriptors */
    348 	int txq_next;			/* next ready Tx descriptor */
    349 
    350 	int txq_sfree;			/* number of free Tx jobs */
    351 	int txq_snext;			/* next free Tx job */
    352 	int txq_sdirty;			/* dirty Tx jobs */
    353 
    354 	/* These 4 variables are used only on the 82547. */
    355 	int txq_fifo_size;		/* Tx FIFO size */
    356 	int txq_fifo_head;		/* current head of FIFO */
    357 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    358 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    359 
    360 	/*
    361 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    362 	 * CPUs. This queue intermediate them without block.
    363 	 */
    364 	pcq_t *txq_interq;
    365 
    366 	/*
    367 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    368 	 * to manage Tx H/W queue's busy flag.
    369 	 */
    370 	int txq_flags;			/* flags for H/W queue, see below */
    371 #define	WM_TXQ_NO_SPACE	0x1
    372 
    373 	bool txq_stopping;
    374 
    375 	bool txq_sending;
    376 	time_t txq_lastsent;
    377 
    378 	uint32_t txq_packets;		/* for AIM */
    379 	uint32_t txq_bytes;		/* for AIM */
    380 #ifdef WM_EVENT_COUNTERS
    381 	/* TX event counters */
    382 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    383 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    384 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    385 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    386 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    387 					    /* XXX not used? */
    388 
    389 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    392 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    393 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    394 	WM_Q_EVCNT_DEFINE(txq, tsopain)     /* Painful header manip. for TSO */
    395 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    396 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    397 					    /* other than toomanyseg */
    398 
    399 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    400 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    401 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    402 
    403 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    404 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    405 #endif /* WM_EVENT_COUNTERS */
    406 };
    407 
    408 struct wm_rxqueue {
    409 	kmutex_t *rxq_lock;		/* lock for rx operations */
    410 
    411 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    412 
    413 	/* Software state for the receive descriptors. */
    414 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    415 
    416 	/* RX control data structures. */
    417 	int rxq_ndesc;			/* must be a power of two */
    418 	size_t rxq_descsize;		/* a rx descriptor size */
    419 	rxdescs_t *rxq_descs_u;
    420 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    421 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    422 	int rxq_desc_rseg;		/* real number of control segment */
    423 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    424 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    425 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    426 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    427 
    428 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    429 
    430 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    431 	int rxq_discard;
    432 	int rxq_len;
    433 	struct mbuf *rxq_head;
    434 	struct mbuf *rxq_tail;
    435 	struct mbuf **rxq_tailp;
    436 
    437 	bool rxq_stopping;
    438 
    439 	uint32_t rxq_packets;		/* for AIM */
    440 	uint32_t rxq_bytes;		/* for AIM */
    441 #ifdef WM_EVENT_COUNTERS
    442 	/* RX event counters */
    443 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    444 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    445 
    446 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    447 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    448 #endif
    449 };
    450 
    451 struct wm_queue {
    452 	int wmq_id;			/* index of TX/RX queues */
    453 	int wmq_intr_idx;		/* index of MSI-X tables */
    454 
    455 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    456 	bool wmq_set_itr;
    457 
    458 	struct wm_txqueue wmq_txq;
    459 	struct wm_rxqueue wmq_rxq;
    460 
    461 	void *wmq_si;
    462 };
    463 
    464 struct wm_phyop {
    465 	int (*acquire)(struct wm_softc *);
    466 	void (*release)(struct wm_softc *);
    467 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    468 	int (*writereg_locked)(device_t, int, int, uint16_t);
    469 	int reset_delay_us;
    470 };
    471 
    472 struct wm_nvmop {
    473 	int (*acquire)(struct wm_softc *);
    474 	void (*release)(struct wm_softc *);
    475 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    476 };
    477 
    478 /*
    479  * Software state per device.
    480  */
    481 struct wm_softc {
    482 	device_t sc_dev;		/* generic device information */
    483 	bus_space_tag_t sc_st;		/* bus space tag */
    484 	bus_space_handle_t sc_sh;	/* bus space handle */
    485 	bus_size_t sc_ss;		/* bus space size */
    486 	bus_space_tag_t sc_iot;		/* I/O space tag */
    487 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    488 	bus_size_t sc_ios;		/* I/O space size */
    489 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    490 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    491 	bus_size_t sc_flashs;		/* flash registers space size */
    492 	off_t sc_flashreg_offset;	/*
    493 					 * offset to flash registers from
    494 					 * start of BAR
    495 					 */
    496 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    497 
    498 	struct ethercom sc_ethercom;	/* ethernet common data */
    499 	struct mii_data sc_mii;		/* MII/media information */
    500 
    501 	pci_chipset_tag_t sc_pc;
    502 	pcitag_t sc_pcitag;
    503 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    504 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    505 
    506 	uint16_t sc_pcidevid;		/* PCI device ID */
    507 	wm_chip_type sc_type;		/* MAC type */
    508 	int sc_rev;			/* MAC revision */
    509 	wm_phy_type sc_phytype;		/* PHY type */
    510 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    511 #define	WM_MEDIATYPE_UNKNOWN		0x00
    512 #define	WM_MEDIATYPE_FIBER		0x01
    513 #define	WM_MEDIATYPE_COPPER		0x02
    514 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    515 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    516 	int sc_flags;			/* flags; see below */
    517 	int sc_if_flags;		/* last if_flags */
    518 	int sc_ec_capenable;		/* last ec_capenable */
    519 	int sc_flowflags;		/* 802.3x flow control flags */
    520 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    521 	int sc_align_tweak;
    522 
    523 	void *sc_ihs[WM_MAX_NINTR];	/*
    524 					 * interrupt cookie.
    525 					 * - legacy and msi use sc_ihs[0] only
    526 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    527 					 */
    528 	pci_intr_handle_t *sc_intrs;	/*
    529 					 * legacy and msi use sc_intrs[0] only
    530 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    531 					 */
    532 	int sc_nintrs;			/* number of interrupts */
    533 
    534 	int sc_link_intr_idx;		/* index of MSI-X tables */
    535 
    536 	callout_t sc_tick_ch;		/* tick callout */
    537 	bool sc_core_stopping;
    538 
    539 	int sc_nvm_ver_major;
    540 	int sc_nvm_ver_minor;
    541 	int sc_nvm_ver_build;
    542 	int sc_nvm_addrbits;		/* NVM address bits */
    543 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    544 	int sc_ich8_flash_base;
    545 	int sc_ich8_flash_bank_size;
    546 	int sc_nvm_k1_enabled;
    547 
    548 	int sc_nqueues;
    549 	struct wm_queue *sc_queue;
    550 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    551 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    552 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    553 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    554 
    555 	int sc_affinity_offset;
    556 
    557 #ifdef WM_EVENT_COUNTERS
    558 	/* Event counters. */
    559 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    560 
    561 	/* WM_T_82542_2_1 only */
    562 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    563 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    564 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    565 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    566 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    567 #endif /* WM_EVENT_COUNTERS */
    568 
    569 	/* This variable are used only on the 82547. */
    570 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    571 
    572 	uint32_t sc_ctrl;		/* prototype CTRL register */
    573 #if 0
    574 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    575 #endif
    576 	uint32_t sc_icr;		/* prototype interrupt bits */
    577 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    578 	uint32_t sc_tctl;		/* prototype TCTL register */
    579 	uint32_t sc_rctl;		/* prototype RCTL register */
    580 	uint32_t sc_txcw;		/* prototype TXCW register */
    581 	uint32_t sc_tipg;		/* prototype TIPG register */
    582 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    583 	uint32_t sc_pba;		/* prototype PBA register */
    584 
    585 	int sc_tbi_linkup;		/* TBI link status */
    586 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    587 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    588 
    589 	int sc_mchash_type;		/* multicast filter offset */
    590 
    591 	krndsource_t rnd_source;	/* random source */
    592 
    593 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    594 
    595 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    596 	kmutex_t *sc_ich_phymtx;	/*
    597 					 * 82574/82583/ICH/PCH specific PHY
    598 					 * mutex. For 82574/82583, the mutex
    599 					 * is used for both PHY and NVM.
    600 					 */
    601 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    602 
    603 	struct wm_phyop phy;
    604 	struct wm_nvmop nvm;
    605 };
    606 
    607 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    608 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    609 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    610 
    611 #define	WM_RXCHAIN_RESET(rxq)						\
    612 do {									\
    613 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    614 	*(rxq)->rxq_tailp = NULL;					\
    615 	(rxq)->rxq_len = 0;						\
    616 } while (/*CONSTCOND*/0)
    617 
    618 #define	WM_RXCHAIN_LINK(rxq, m)						\
    619 do {									\
    620 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    621 	(rxq)->rxq_tailp = &(m)->m_next;				\
    622 } while (/*CONSTCOND*/0)
    623 
    624 #ifdef WM_EVENT_COUNTERS
    625 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    626 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    627 
    628 #define WM_Q_EVCNT_INCR(qname, evname)			\
    629 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    630 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    631 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    632 #else /* !WM_EVENT_COUNTERS */
    633 #define	WM_EVCNT_INCR(ev)	/* nothing */
    634 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    635 
    636 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    637 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    638 #endif /* !WM_EVENT_COUNTERS */
    639 
    640 #define	CSR_READ(sc, reg)						\
    641 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    642 #define	CSR_WRITE(sc, reg, val)						\
    643 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    644 #define	CSR_WRITE_FLUSH(sc)						\
    645 	(void) CSR_READ((sc), WMREG_STATUS)
    646 
    647 #define ICH8_FLASH_READ32(sc, reg)					\
    648 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    649 	    (reg) + sc->sc_flashreg_offset)
    650 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    651 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    652 	    (reg) + sc->sc_flashreg_offset, (data))
    653 
    654 #define ICH8_FLASH_READ16(sc, reg)					\
    655 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    656 	    (reg) + sc->sc_flashreg_offset)
    657 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    658 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    659 	    (reg) + sc->sc_flashreg_offset, (data))
    660 
    661 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    662 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    663 
    664 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    665 #define	WM_CDTXADDR_HI(txq, x)						\
    666 	(sizeof(bus_addr_t) == 8 ?					\
    667 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    668 
    669 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    670 #define	WM_CDRXADDR_HI(rxq, x)						\
    671 	(sizeof(bus_addr_t) == 8 ?					\
    672 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    673 
    674 /*
    675  * Register read/write functions.
    676  * Other than CSR_{READ|WRITE}().
    677  */
    678 #if 0
    679 static inline uint32_t wm_io_read(struct wm_softc *, int);
    680 #endif
    681 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    682 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    683     uint32_t, uint32_t);
    684 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    685 
    686 /*
    687  * Descriptor sync/init functions.
    688  */
    689 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    690 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    691 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    692 
    693 /*
    694  * Device driver interface functions and commonly used functions.
    695  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    696  */
    697 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    698 static int	wm_match(device_t, cfdata_t, void *);
    699 static void	wm_attach(device_t, device_t, void *);
    700 static int	wm_detach(device_t, int);
    701 static bool	wm_suspend(device_t, const pmf_qual_t *);
    702 static bool	wm_resume(device_t, const pmf_qual_t *);
    703 static void	wm_watchdog(struct ifnet *);
    704 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    705     uint16_t *);
    706 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    707     uint16_t *);
    708 static void	wm_tick(void *);
    709 static int	wm_ifflags_cb(struct ethercom *);
    710 static int	wm_ioctl(struct ifnet *, u_long, void *);
    711 /* MAC address related */
    712 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    713 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    714 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    715 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    716 static int	wm_rar_count(struct wm_softc *);
    717 static void	wm_set_filter(struct wm_softc *);
    718 /* Reset and init related */
    719 static void	wm_set_vlan(struct wm_softc *);
    720 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    721 static void	wm_get_auto_rd_done(struct wm_softc *);
    722 static void	wm_lan_init_done(struct wm_softc *);
    723 static void	wm_get_cfg_done(struct wm_softc *);
    724 static int	wm_phy_post_reset(struct wm_softc *);
    725 static int	wm_write_smbus_addr(struct wm_softc *);
    726 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    727 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    728 static void	wm_initialize_hardware_bits(struct wm_softc *);
    729 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    730 static int	wm_reset_phy(struct wm_softc *);
    731 static void	wm_flush_desc_rings(struct wm_softc *);
    732 static void	wm_reset(struct wm_softc *);
    733 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    734 static void	wm_rxdrain(struct wm_rxqueue *);
    735 static void	wm_init_rss(struct wm_softc *);
    736 static void	wm_adjust_qnum(struct wm_softc *, int);
    737 static inline bool	wm_is_using_msix(struct wm_softc *);
    738 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    739 static int	wm_softint_establish(struct wm_softc *, int, int);
    740 static int	wm_setup_legacy(struct wm_softc *);
    741 static int	wm_setup_msix(struct wm_softc *);
    742 static int	wm_init(struct ifnet *);
    743 static int	wm_init_locked(struct ifnet *);
    744 static void	wm_unset_stopping_flags(struct wm_softc *);
    745 static void	wm_set_stopping_flags(struct wm_softc *);
    746 static void	wm_stop(struct ifnet *, int);
    747 static void	wm_stop_locked(struct ifnet *, int);
    748 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    749 static void	wm_82547_txfifo_stall(void *);
    750 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    751 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    752 /* DMA related */
    753 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    754 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    755 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    756 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    757     struct wm_txqueue *);
    758 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    759 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    760 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    761     struct wm_rxqueue *);
    762 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    763 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    764 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    765 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    766 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    767 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    768 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    769     struct wm_txqueue *);
    770 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    771     struct wm_rxqueue *);
    772 static int	wm_alloc_txrx_queues(struct wm_softc *);
    773 static void	wm_free_txrx_queues(struct wm_softc *);
    774 static int	wm_init_txrx_queues(struct wm_softc *);
    775 /* Start */
    776 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    777     struct wm_txsoft *, uint32_t *, uint8_t *);
    778 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    779 static void	wm_start(struct ifnet *);
    780 static void	wm_start_locked(struct ifnet *);
    781 static int	wm_transmit(struct ifnet *, struct mbuf *);
    782 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    783 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    784     bool);
    785 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    786     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    787 static void	wm_nq_start(struct ifnet *);
    788 static void	wm_nq_start_locked(struct ifnet *);
    789 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    790 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    791 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    792     bool);
    793 static void	wm_deferred_start_locked(struct wm_txqueue *);
    794 static void	wm_handle_queue(void *);
    795 /* Interrupt */
    796 static bool	wm_txeof(struct wm_txqueue *, u_int);
    797 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    798 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    799 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    800 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    801 static void	wm_linkintr(struct wm_softc *, uint32_t);
    802 static int	wm_intr_legacy(void *);
    803 static inline void	wm_txrxintr_disable(struct wm_queue *);
    804 static inline void	wm_txrxintr_enable(struct wm_queue *);
    805 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    806 static int	wm_txrxintr_msix(void *);
    807 static int	wm_linkintr_msix(void *);
    808 
    809 /*
    810  * Media related.
    811  * GMII, SGMII, TBI, SERDES and SFP.
    812  */
    813 /* Common */
    814 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    815 /* GMII related */
    816 static void	wm_gmii_reset(struct wm_softc *);
    817 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    818 static int	wm_get_phy_id_82575(struct wm_softc *);
    819 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    820 static int	wm_gmii_mediachange(struct ifnet *);
    821 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    822 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    823 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    824 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    825 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    826 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    827 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    828 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    829 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    830 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    831 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    832 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    833 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    834 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    835 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    836 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    837 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    838 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    839 	bool);
    840 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    841 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    842 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    843 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    844 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    845 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    846 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    847 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    848 static void	wm_gmii_statchg(struct ifnet *);
    849 /*
    850  * kumeran related (80003, ICH* and PCH*).
    851  * These functions are not for accessing MII registers but for accessing
    852  * kumeran specific registers.
    853  */
    854 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    855 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    856 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    857 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    858 /* EMI register related */
    859 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    860 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    861 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    862 /* SGMII */
    863 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    864 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    865 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    866 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    867 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    868 /* TBI related */
    869 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    870 static void	wm_tbi_mediainit(struct wm_softc *);
    871 static int	wm_tbi_mediachange(struct ifnet *);
    872 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    873 static int	wm_check_for_link(struct wm_softc *);
    874 static void	wm_tbi_tick(struct wm_softc *);
    875 /* SERDES related */
    876 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    877 static int	wm_serdes_mediachange(struct ifnet *);
    878 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    879 static void	wm_serdes_tick(struct wm_softc *);
    880 /* SFP related */
    881 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    882 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    883 
    884 /*
    885  * NVM related.
    886  * Microwire, SPI (w/wo EERD) and Flash.
    887  */
    888 /* Misc functions */
    889 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    890 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    891 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    892 /* Microwire */
    893 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    894 /* SPI */
    895 static int	wm_nvm_ready_spi(struct wm_softc *);
    896 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    897 /* Using with EERD */
    898 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    899 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    900 /* Flash */
    901 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    902     unsigned int *);
    903 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    904 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    905 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    906     uint32_t *);
    907 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    908 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    909 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    910 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    911 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    912 /* iNVM */
    913 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    914 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    915 /* Lock, detecting NVM type, validate checksum and read */
    916 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    917 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    918 static int	wm_nvm_validate_checksum(struct wm_softc *);
    919 static void	wm_nvm_version_invm(struct wm_softc *);
    920 static void	wm_nvm_version(struct wm_softc *);
    921 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    922 
    923 /*
    924  * Hardware semaphores.
    925  * Very complexed...
    926  */
    927 static int	wm_get_null(struct wm_softc *);
    928 static void	wm_put_null(struct wm_softc *);
    929 static int	wm_get_eecd(struct wm_softc *);
    930 static void	wm_put_eecd(struct wm_softc *);
    931 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    932 static void	wm_put_swsm_semaphore(struct wm_softc *);
    933 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    934 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    935 static int	wm_get_nvm_80003(struct wm_softc *);
    936 static void	wm_put_nvm_80003(struct wm_softc *);
    937 static int	wm_get_nvm_82571(struct wm_softc *);
    938 static void	wm_put_nvm_82571(struct wm_softc *);
    939 static int	wm_get_phy_82575(struct wm_softc *);
    940 static void	wm_put_phy_82575(struct wm_softc *);
    941 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    942 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    943 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    944 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    945 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    946 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    947 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    948 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    949 
    950 /*
    951  * Management mode and power management related subroutines.
    952  * BMC, AMT, suspend/resume and EEE.
    953  */
    954 #if 0
    955 static int	wm_check_mng_mode(struct wm_softc *);
    956 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    957 static int	wm_check_mng_mode_82574(struct wm_softc *);
    958 static int	wm_check_mng_mode_generic(struct wm_softc *);
    959 #endif
    960 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    961 static bool	wm_phy_resetisblocked(struct wm_softc *);
    962 static void	wm_get_hw_control(struct wm_softc *);
    963 static void	wm_release_hw_control(struct wm_softc *);
    964 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    965 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    966 static void	wm_init_manageability(struct wm_softc *);
    967 static void	wm_release_manageability(struct wm_softc *);
    968 static void	wm_get_wakeup(struct wm_softc *);
    969 static int	wm_ulp_disable(struct wm_softc *);
    970 static int	wm_enable_phy_wakeup(struct wm_softc *);
    971 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    972 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    973 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    974 static void	wm_enable_wakeup(struct wm_softc *);
    975 static void	wm_disable_aspm(struct wm_softc *);
    976 /* LPLU (Low Power Link Up) */
    977 static void	wm_lplu_d0_disable(struct wm_softc *);
    978 /* EEE */
    979 static int	wm_set_eee_i350(struct wm_softc *);
    980 static int	wm_set_eee_pchlan(struct wm_softc *);
    981 static int	wm_set_eee(struct wm_softc *);
    982 
    983 /*
    984  * Workarounds (mainly PHY related).
    985  * Basically, PHY's workarounds are in the PHY drivers.
    986  */
    987 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    988 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    989 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
    990 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
    991 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
    992 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    993 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    994 static int	wm_k1_workaround_lv(struct wm_softc *);
    995 static int	wm_link_stall_workaround_hv(struct wm_softc *);
    996 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    997 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    998 static void	wm_reset_init_script_82575(struct wm_softc *);
    999 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1000 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1001 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1002 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1003 static int	wm_pll_workaround_i210(struct wm_softc *);
   1004 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1005 
   1006 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1007     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1008 
   1009 /*
   1010  * Devices supported by this driver.
   1011  */
   1012 static const struct wm_product {
   1013 	pci_vendor_id_t		wmp_vendor;
   1014 	pci_product_id_t	wmp_product;
   1015 	const char		*wmp_name;
   1016 	wm_chip_type		wmp_type;
   1017 	uint32_t		wmp_flags;
   1018 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1019 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1020 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1021 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1022 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1023 } wm_products[] = {
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1025 	  "Intel i82542 1000BASE-X Ethernet",
   1026 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1029 	  "Intel i82543GC 1000BASE-X Ethernet",
   1030 	  WM_T_82543,		WMP_F_FIBER },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1033 	  "Intel i82543GC 1000BASE-T Ethernet",
   1034 	  WM_T_82543,		WMP_F_COPPER },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1037 	  "Intel i82544EI 1000BASE-T Ethernet",
   1038 	  WM_T_82544,		WMP_F_COPPER },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1041 	  "Intel i82544EI 1000BASE-X Ethernet",
   1042 	  WM_T_82544,		WMP_F_FIBER },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1045 	  "Intel i82544GC 1000BASE-T Ethernet",
   1046 	  WM_T_82544,		WMP_F_COPPER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1049 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1050 	  WM_T_82544,		WMP_F_COPPER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1053 	  "Intel i82540EM 1000BASE-T Ethernet",
   1054 	  WM_T_82540,		WMP_F_COPPER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1057 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1058 	  WM_T_82540,		WMP_F_COPPER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1061 	  "Intel i82540EP 1000BASE-T Ethernet",
   1062 	  WM_T_82540,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1065 	  "Intel i82540EP 1000BASE-T Ethernet",
   1066 	  WM_T_82540,		WMP_F_COPPER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1069 	  "Intel i82540EP 1000BASE-T Ethernet",
   1070 	  WM_T_82540,		WMP_F_COPPER },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1073 	  "Intel i82545EM 1000BASE-T Ethernet",
   1074 	  WM_T_82545,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1077 	  "Intel i82545GM 1000BASE-T Ethernet",
   1078 	  WM_T_82545_3,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1081 	  "Intel i82545GM 1000BASE-X Ethernet",
   1082 	  WM_T_82545_3,		WMP_F_FIBER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1085 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1086 	  WM_T_82545_3,		WMP_F_SERDES },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1089 	  "Intel i82546EB 1000BASE-T Ethernet",
   1090 	  WM_T_82546,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1093 	  "Intel i82546EB 1000BASE-T Ethernet",
   1094 	  WM_T_82546,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1097 	  "Intel i82545EM 1000BASE-X Ethernet",
   1098 	  WM_T_82545,		WMP_F_FIBER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1101 	  "Intel i82546EB 1000BASE-X Ethernet",
   1102 	  WM_T_82546,		WMP_F_FIBER },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1105 	  "Intel i82546GB 1000BASE-T Ethernet",
   1106 	  WM_T_82546_3,		WMP_F_COPPER },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1109 	  "Intel i82546GB 1000BASE-X Ethernet",
   1110 	  WM_T_82546_3,		WMP_F_FIBER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1113 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1114 	  WM_T_82546_3,		WMP_F_SERDES },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1117 	  "i82546GB quad-port Gigabit Ethernet",
   1118 	  WM_T_82546_3,		WMP_F_COPPER },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1121 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1122 	  WM_T_82546_3,		WMP_F_COPPER },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1125 	  "Intel PRO/1000MT (82546GB)",
   1126 	  WM_T_82546_3,		WMP_F_COPPER },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1129 	  "Intel i82541EI 1000BASE-T Ethernet",
   1130 	  WM_T_82541,		WMP_F_COPPER },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1133 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1134 	  WM_T_82541,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1137 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1138 	  WM_T_82541,		WMP_F_COPPER },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1141 	  "Intel i82541ER 1000BASE-T Ethernet",
   1142 	  WM_T_82541_2,		WMP_F_COPPER },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1145 	  "Intel i82541GI 1000BASE-T Ethernet",
   1146 	  WM_T_82541_2,		WMP_F_COPPER },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1149 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1150 	  WM_T_82541_2,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1153 	  "Intel i82541PI 1000BASE-T Ethernet",
   1154 	  WM_T_82541_2,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1157 	  "Intel i82547EI 1000BASE-T Ethernet",
   1158 	  WM_T_82547,		WMP_F_COPPER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1161 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1162 	  WM_T_82547,		WMP_F_COPPER },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1165 	  "Intel i82547GI 1000BASE-T Ethernet",
   1166 	  WM_T_82547_2,		WMP_F_COPPER },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1169 	  "Intel PRO/1000 PT (82571EB)",
   1170 	  WM_T_82571,		WMP_F_COPPER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1173 	  "Intel PRO/1000 PF (82571EB)",
   1174 	  WM_T_82571,		WMP_F_FIBER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1177 	  "Intel PRO/1000 PB (82571EB)",
   1178 	  WM_T_82571,		WMP_F_SERDES },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1181 	  "Intel PRO/1000 QT (82571EB)",
   1182 	  WM_T_82571,		WMP_F_COPPER },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1185 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1186 	  WM_T_82571,		WMP_F_COPPER, },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1189 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1190 	  WM_T_82571,		WMP_F_COPPER, },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1193 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1194 	  WM_T_82571,		WMP_F_SERDES, },
   1195 
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1197 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1198 	  WM_T_82571,		WMP_F_SERDES, },
   1199 
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1201 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1202 	  WM_T_82571,		WMP_F_FIBER, },
   1203 
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1205 	  "Intel i82572EI 1000baseT Ethernet",
   1206 	  WM_T_82572,		WMP_F_COPPER },
   1207 
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1209 	  "Intel i82572EI 1000baseX Ethernet",
   1210 	  WM_T_82572,		WMP_F_FIBER },
   1211 
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1213 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1214 	  WM_T_82572,		WMP_F_SERDES },
   1215 
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1217 	  "Intel i82572EI 1000baseT Ethernet",
   1218 	  WM_T_82572,		WMP_F_COPPER },
   1219 
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1221 	  "Intel i82573E",
   1222 	  WM_T_82573,		WMP_F_COPPER },
   1223 
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1225 	  "Intel i82573E IAMT",
   1226 	  WM_T_82573,		WMP_F_COPPER },
   1227 
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1229 	  "Intel i82573L Gigabit Ethernet",
   1230 	  WM_T_82573,		WMP_F_COPPER },
   1231 
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1233 	  "Intel i82574L",
   1234 	  WM_T_82574,		WMP_F_COPPER },
   1235 
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1237 	  "Intel i82574L",
   1238 	  WM_T_82574,		WMP_F_COPPER },
   1239 
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1241 	  "Intel i82583V",
   1242 	  WM_T_82583,		WMP_F_COPPER },
   1243 
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1245 	  "i80003 dual 1000baseT Ethernet",
   1246 	  WM_T_80003,		WMP_F_COPPER },
   1247 
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1249 	  "i80003 dual 1000baseX Ethernet",
   1250 	  WM_T_80003,		WMP_F_COPPER },
   1251 
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1253 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1254 	  WM_T_80003,		WMP_F_SERDES },
   1255 
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1257 	  "Intel i80003 1000baseT Ethernet",
   1258 	  WM_T_80003,		WMP_F_COPPER },
   1259 
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1261 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1262 	  WM_T_80003,		WMP_F_SERDES },
   1263 
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1265 	  "Intel i82801H (M_AMT) LAN Controller",
   1266 	  WM_T_ICH8,		WMP_F_COPPER },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1268 	  "Intel i82801H (AMT) LAN Controller",
   1269 	  WM_T_ICH8,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1271 	  "Intel i82801H LAN Controller",
   1272 	  WM_T_ICH8,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1274 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1275 	  WM_T_ICH8,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1277 	  "Intel i82801H (M) LAN Controller",
   1278 	  WM_T_ICH8,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1280 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1281 	  WM_T_ICH8,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1283 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1284 	  WM_T_ICH8,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1286 	  "82567V-3 LAN Controller",
   1287 	  WM_T_ICH8,		WMP_F_COPPER },
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1289 	  "82801I (AMT) LAN Controller",
   1290 	  WM_T_ICH9,		WMP_F_COPPER },
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1292 	  "82801I 10/100 LAN Controller",
   1293 	  WM_T_ICH9,		WMP_F_COPPER },
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1295 	  "82801I (G) 10/100 LAN Controller",
   1296 	  WM_T_ICH9,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1298 	  "82801I (GT) 10/100 LAN Controller",
   1299 	  WM_T_ICH9,		WMP_F_COPPER },
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1301 	  "82801I (C) LAN Controller",
   1302 	  WM_T_ICH9,		WMP_F_COPPER },
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1304 	  "82801I mobile LAN Controller",
   1305 	  WM_T_ICH9,		WMP_F_COPPER },
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1307 	  "82801I mobile (V) LAN Controller",
   1308 	  WM_T_ICH9,		WMP_F_COPPER },
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1310 	  "82801I mobile (AMT) LAN Controller",
   1311 	  WM_T_ICH9,		WMP_F_COPPER },
   1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1313 	  "82567LM-4 LAN Controller",
   1314 	  WM_T_ICH9,		WMP_F_COPPER },
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1316 	  "82567LM-2 LAN Controller",
   1317 	  WM_T_ICH10,		WMP_F_COPPER },
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1319 	  "82567LF-2 LAN Controller",
   1320 	  WM_T_ICH10,		WMP_F_COPPER },
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1322 	  "82567LM-3 LAN Controller",
   1323 	  WM_T_ICH10,		WMP_F_COPPER },
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1325 	  "82567LF-3 LAN Controller",
   1326 	  WM_T_ICH10,		WMP_F_COPPER },
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1328 	  "82567V-2 LAN Controller",
   1329 	  WM_T_ICH10,		WMP_F_COPPER },
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1331 	  "82567V-3? LAN Controller",
   1332 	  WM_T_ICH10,		WMP_F_COPPER },
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1334 	  "HANKSVILLE LAN Controller",
   1335 	  WM_T_ICH10,		WMP_F_COPPER },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1337 	  "PCH LAN (82577LM) Controller",
   1338 	  WM_T_PCH,		WMP_F_COPPER },
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1340 	  "PCH LAN (82577LC) Controller",
   1341 	  WM_T_PCH,		WMP_F_COPPER },
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1343 	  "PCH LAN (82578DM) Controller",
   1344 	  WM_T_PCH,		WMP_F_COPPER },
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1346 	  "PCH LAN (82578DC) Controller",
   1347 	  WM_T_PCH,		WMP_F_COPPER },
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1349 	  "PCH2 LAN (82579LM) Controller",
   1350 	  WM_T_PCH2,		WMP_F_COPPER },
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1352 	  "PCH2 LAN (82579V) Controller",
   1353 	  WM_T_PCH2,		WMP_F_COPPER },
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1355 	  "82575EB dual-1000baseT Ethernet",
   1356 	  WM_T_82575,		WMP_F_COPPER },
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1358 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1359 	  WM_T_82575,		WMP_F_SERDES },
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1361 	  "82575GB quad-1000baseT Ethernet",
   1362 	  WM_T_82575,		WMP_F_COPPER },
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1364 	  "82575GB quad-1000baseT Ethernet (PM)",
   1365 	  WM_T_82575,		WMP_F_COPPER },
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1367 	  "82576 1000BaseT Ethernet",
   1368 	  WM_T_82576,		WMP_F_COPPER },
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1370 	  "82576 1000BaseX Ethernet",
   1371 	  WM_T_82576,		WMP_F_FIBER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1374 	  "82576 gigabit Ethernet (SERDES)",
   1375 	  WM_T_82576,		WMP_F_SERDES },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1378 	  "82576 quad-1000BaseT Ethernet",
   1379 	  WM_T_82576,		WMP_F_COPPER },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1382 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1383 	  WM_T_82576,		WMP_F_COPPER },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1386 	  "82576 gigabit Ethernet",
   1387 	  WM_T_82576,		WMP_F_COPPER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1390 	  "82576 gigabit Ethernet (SERDES)",
   1391 	  WM_T_82576,		WMP_F_SERDES },
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1393 	  "82576 quad-gigabit Ethernet (SERDES)",
   1394 	  WM_T_82576,		WMP_F_SERDES },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1397 	  "82580 1000BaseT Ethernet",
   1398 	  WM_T_82580,		WMP_F_COPPER },
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1400 	  "82580 1000BaseX Ethernet",
   1401 	  WM_T_82580,		WMP_F_FIBER },
   1402 
   1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1404 	  "82580 1000BaseT Ethernet (SERDES)",
   1405 	  WM_T_82580,		WMP_F_SERDES },
   1406 
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1408 	  "82580 gigabit Ethernet (SGMII)",
   1409 	  WM_T_82580,		WMP_F_COPPER },
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1411 	  "82580 dual-1000BaseT Ethernet",
   1412 	  WM_T_82580,		WMP_F_COPPER },
   1413 
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1415 	  "82580 quad-1000BaseX Ethernet",
   1416 	  WM_T_82580,		WMP_F_FIBER },
   1417 
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1419 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1420 	  WM_T_82580,		WMP_F_COPPER },
   1421 
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1423 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1424 	  WM_T_82580,		WMP_F_SERDES },
   1425 
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1427 	  "DH89XXCC 1000BASE-KX Ethernet",
   1428 	  WM_T_82580,		WMP_F_SERDES },
   1429 
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1431 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1432 	  WM_T_82580,		WMP_F_SERDES },
   1433 
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1435 	  "I350 Gigabit Network Connection",
   1436 	  WM_T_I350,		WMP_F_COPPER },
   1437 
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1439 	  "I350 Gigabit Fiber Network Connection",
   1440 	  WM_T_I350,		WMP_F_FIBER },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1443 	  "I350 Gigabit Backplane Connection",
   1444 	  WM_T_I350,		WMP_F_SERDES },
   1445 
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1447 	  "I350 Quad Port Gigabit Ethernet",
   1448 	  WM_T_I350,		WMP_F_SERDES },
   1449 
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1451 	  "I350 Gigabit Connection",
   1452 	  WM_T_I350,		WMP_F_COPPER },
   1453 
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1455 	  "I354 Gigabit Ethernet (KX)",
   1456 	  WM_T_I354,		WMP_F_SERDES },
   1457 
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1459 	  "I354 Gigabit Ethernet (SGMII)",
   1460 	  WM_T_I354,		WMP_F_COPPER },
   1461 
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1463 	  "I354 Gigabit Ethernet (2.5G)",
   1464 	  WM_T_I354,		WMP_F_COPPER },
   1465 
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1467 	  "I210-T1 Ethernet Server Adapter",
   1468 	  WM_T_I210,		WMP_F_COPPER },
   1469 
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1471 	  "I210 Ethernet (Copper OEM)",
   1472 	  WM_T_I210,		WMP_F_COPPER },
   1473 
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1475 	  "I210 Ethernet (Copper IT)",
   1476 	  WM_T_I210,		WMP_F_COPPER },
   1477 
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1479 	  "I210 Ethernet (FLASH less)",
   1480 	  WM_T_I210,		WMP_F_COPPER },
   1481 
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1483 	  "I210 Gigabit Ethernet (Fiber)",
   1484 	  WM_T_I210,		WMP_F_FIBER },
   1485 
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1487 	  "I210 Gigabit Ethernet (SERDES)",
   1488 	  WM_T_I210,		WMP_F_SERDES },
   1489 
   1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1491 	  "I210 Gigabit Ethernet (FLASH less)",
   1492 	  WM_T_I210,		WMP_F_SERDES },
   1493 
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1495 	  "I210 Gigabit Ethernet (SGMII)",
   1496 	  WM_T_I210,		WMP_F_COPPER },
   1497 
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1499 	  "I211 Ethernet (COPPER)",
   1500 	  WM_T_I211,		WMP_F_COPPER },
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1502 	  "I217 V Ethernet Connection",
   1503 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1505 	  "I217 LM Ethernet Connection",
   1506 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1508 	  "I218 V Ethernet Connection",
   1509 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1511 	  "I218 V Ethernet Connection",
   1512 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1514 	  "I218 V Ethernet Connection",
   1515 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1517 	  "I218 LM Ethernet Connection",
   1518 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1520 	  "I218 LM Ethernet Connection",
   1521 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1523 	  "I218 LM Ethernet Connection",
   1524 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1526 	  "I219 V Ethernet Connection",
   1527 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1529 	  "I219 V Ethernet Connection",
   1530 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1532 	  "I219 V Ethernet Connection",
   1533 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1535 	  "I219 V Ethernet Connection",
   1536 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1538 	  "I219 LM Ethernet Connection",
   1539 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1541 	  "I219 LM Ethernet Connection",
   1542 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1544 	  "I219 LM Ethernet Connection",
   1545 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1547 	  "I219 LM Ethernet Connection",
   1548 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1550 	  "I219 LM Ethernet Connection",
   1551 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1553 	  "I219 V Ethernet Connection",
   1554 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1556 	  "I219 V Ethernet Connection",
   1557 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1559 	  "I219 LM Ethernet Connection",
   1560 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1562 	  "I219 LM Ethernet Connection",
   1563 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1564 	{ 0,			0,
   1565 	  NULL,
   1566 	  0,			0 },
   1567 };
   1568 
   1569 /*
   1570  * Register read/write functions.
   1571  * Other than CSR_{READ|WRITE}().
   1572  */
   1573 
   1574 #if 0 /* Not currently used */
   1575 static inline uint32_t
   1576 wm_io_read(struct wm_softc *sc, int reg)
   1577 {
   1578 
   1579 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1580 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1581 }
   1582 #endif
   1583 
   1584 static inline void
   1585 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1586 {
   1587 
   1588 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1589 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1590 }
   1591 
   1592 static inline void
   1593 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1594     uint32_t data)
   1595 {
   1596 	uint32_t regval;
   1597 	int i;
   1598 
   1599 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1600 
   1601 	CSR_WRITE(sc, reg, regval);
   1602 
   1603 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1604 		delay(5);
   1605 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1606 			break;
   1607 	}
   1608 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1609 		aprint_error("%s: WARNING:"
   1610 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1611 		    device_xname(sc->sc_dev), reg);
   1612 	}
   1613 }
   1614 
   1615 static inline void
   1616 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1617 {
   1618 	wa->wa_low = htole32(v & 0xffffffffU);
   1619 	if (sizeof(bus_addr_t) == 8)
   1620 		wa->wa_high = htole32((uint64_t) v >> 32);
   1621 	else
   1622 		wa->wa_high = 0;
   1623 }
   1624 
   1625 /*
   1626  * Descriptor sync/init functions.
   1627  */
   1628 static inline void
   1629 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1630 {
   1631 	struct wm_softc *sc = txq->txq_sc;
   1632 
   1633 	/* If it will wrap around, sync to the end of the ring. */
   1634 	if ((start + num) > WM_NTXDESC(txq)) {
   1635 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1636 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1637 		    (WM_NTXDESC(txq) - start), ops);
   1638 		num -= (WM_NTXDESC(txq) - start);
   1639 		start = 0;
   1640 	}
   1641 
   1642 	/* Now sync whatever is left. */
   1643 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1644 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1645 }
   1646 
   1647 static inline void
   1648 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1649 {
   1650 	struct wm_softc *sc = rxq->rxq_sc;
   1651 
   1652 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1653 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1654 }
   1655 
   1656 static inline void
   1657 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1658 {
   1659 	struct wm_softc *sc = rxq->rxq_sc;
   1660 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1661 	struct mbuf *m = rxs->rxs_mbuf;
   1662 
   1663 	/*
   1664 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1665 	 * so that the payload after the Ethernet header is aligned
   1666 	 * to a 4-byte boundary.
   1667 
   1668 	 * XXX BRAINDAMAGE ALERT!
   1669 	 * The stupid chip uses the same size for every buffer, which
   1670 	 * is set in the Receive Control register.  We are using the 2K
   1671 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1672 	 * reason, we can't "scoot" packets longer than the standard
   1673 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1674 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1675 	 * the upper layer copy the headers.
   1676 	 */
   1677 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1678 
   1679 	if (sc->sc_type == WM_T_82574) {
   1680 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1681 		rxd->erx_data.erxd_addr =
   1682 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1683 		rxd->erx_data.erxd_dd = 0;
   1684 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1685 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1686 
   1687 		rxd->nqrx_data.nrxd_paddr =
   1688 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1689 		/* Currently, split header is not supported. */
   1690 		rxd->nqrx_data.nrxd_haddr = 0;
   1691 	} else {
   1692 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1693 
   1694 		wm_set_dma_addr(&rxd->wrx_addr,
   1695 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1696 		rxd->wrx_len = 0;
   1697 		rxd->wrx_cksum = 0;
   1698 		rxd->wrx_status = 0;
   1699 		rxd->wrx_errors = 0;
   1700 		rxd->wrx_special = 0;
   1701 	}
   1702 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1703 
   1704 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1705 }
   1706 
   1707 /*
   1708  * Device driver interface functions and commonly used functions.
   1709  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1710  */
   1711 
   1712 /* Lookup supported device table */
   1713 static const struct wm_product *
   1714 wm_lookup(const struct pci_attach_args *pa)
   1715 {
   1716 	const struct wm_product *wmp;
   1717 
   1718 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1719 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1720 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1721 			return wmp;
   1722 	}
   1723 	return NULL;
   1724 }
   1725 
   1726 /* The match function (ca_match) */
   1727 static int
   1728 wm_match(device_t parent, cfdata_t cf, void *aux)
   1729 {
   1730 	struct pci_attach_args *pa = aux;
   1731 
   1732 	if (wm_lookup(pa) != NULL)
   1733 		return 1;
   1734 
   1735 	return 0;
   1736 }
   1737 
   1738 /* The attach function (ca_attach) */
   1739 static void
   1740 wm_attach(device_t parent, device_t self, void *aux)
   1741 {
   1742 	struct wm_softc *sc = device_private(self);
   1743 	struct pci_attach_args *pa = aux;
   1744 	prop_dictionary_t dict;
   1745 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1746 	pci_chipset_tag_t pc = pa->pa_pc;
   1747 	int counts[PCI_INTR_TYPE_SIZE];
   1748 	pci_intr_type_t max_type;
   1749 	const char *eetype, *xname;
   1750 	bus_space_tag_t memt;
   1751 	bus_space_handle_t memh;
   1752 	bus_size_t memsize;
   1753 	int memh_valid;
   1754 	int i, error;
   1755 	const struct wm_product *wmp;
   1756 	prop_data_t ea;
   1757 	prop_number_t pn;
   1758 	uint8_t enaddr[ETHER_ADDR_LEN];
   1759 	char buf[256];
   1760 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1761 	pcireg_t preg, memtype;
   1762 	uint16_t eeprom_data, apme_mask;
   1763 	bool force_clear_smbi;
   1764 	uint32_t link_mode;
   1765 	uint32_t reg;
   1766 
   1767 	sc->sc_dev = self;
   1768 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1769 	sc->sc_core_stopping = false;
   1770 
   1771 	wmp = wm_lookup(pa);
   1772 #ifdef DIAGNOSTIC
   1773 	if (wmp == NULL) {
   1774 		printf("\n");
   1775 		panic("wm_attach: impossible");
   1776 	}
   1777 #endif
   1778 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1779 
   1780 	sc->sc_pc = pa->pa_pc;
   1781 	sc->sc_pcitag = pa->pa_tag;
   1782 
   1783 	if (pci_dma64_available(pa))
   1784 		sc->sc_dmat = pa->pa_dmat64;
   1785 	else
   1786 		sc->sc_dmat = pa->pa_dmat;
   1787 
   1788 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1789 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1790 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1791 
   1792 	sc->sc_type = wmp->wmp_type;
   1793 
   1794 	/* Set default function pointers */
   1795 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1796 	sc->phy.release = sc->nvm.release = wm_put_null;
   1797 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1798 
   1799 	if (sc->sc_type < WM_T_82543) {
   1800 		if (sc->sc_rev < 2) {
   1801 			aprint_error_dev(sc->sc_dev,
   1802 			    "i82542 must be at least rev. 2\n");
   1803 			return;
   1804 		}
   1805 		if (sc->sc_rev < 3)
   1806 			sc->sc_type = WM_T_82542_2_0;
   1807 	}
   1808 
   1809 	/*
   1810 	 * Disable MSI for Errata:
   1811 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1812 	 *
   1813 	 *  82544: Errata 25
   1814 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1815 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1816 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1817 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1818 	 *
   1819 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1820 	 *
   1821 	 *  82571 & 82572: Errata 63
   1822 	 */
   1823 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1824 	    || (sc->sc_type == WM_T_82572))
   1825 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1826 
   1827 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1828 	    || (sc->sc_type == WM_T_82580)
   1829 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1830 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1831 		sc->sc_flags |= WM_F_NEWQUEUE;
   1832 
   1833 	/* Set device properties (mactype) */
   1834 	dict = device_properties(sc->sc_dev);
   1835 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1836 
   1837 	/*
   1838 	 * Map the device.  All devices support memory-mapped acccess,
   1839 	 * and it is really required for normal operation.
   1840 	 */
   1841 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1842 	switch (memtype) {
   1843 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1844 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1845 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1846 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1847 		break;
   1848 	default:
   1849 		memh_valid = 0;
   1850 		break;
   1851 	}
   1852 
   1853 	if (memh_valid) {
   1854 		sc->sc_st = memt;
   1855 		sc->sc_sh = memh;
   1856 		sc->sc_ss = memsize;
   1857 	} else {
   1858 		aprint_error_dev(sc->sc_dev,
   1859 		    "unable to map device registers\n");
   1860 		return;
   1861 	}
   1862 
   1863 	/*
   1864 	 * In addition, i82544 and later support I/O mapped indirect
   1865 	 * register access.  It is not desirable (nor supported in
   1866 	 * this driver) to use it for normal operation, though it is
   1867 	 * required to work around bugs in some chip versions.
   1868 	 */
   1869 	if (sc->sc_type >= WM_T_82544) {
   1870 		/* First we have to find the I/O BAR. */
   1871 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1872 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1873 			if (memtype == PCI_MAPREG_TYPE_IO)
   1874 				break;
   1875 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1876 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1877 				i += 4;	/* skip high bits, too */
   1878 		}
   1879 		if (i < PCI_MAPREG_END) {
   1880 			/*
   1881 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1882 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1883 			 * It's no problem because newer chips has no this
   1884 			 * bug.
   1885 			 *
   1886 			 * The i8254x doesn't apparently respond when the
   1887 			 * I/O BAR is 0, which looks somewhat like it's not
   1888 			 * been configured.
   1889 			 */
   1890 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1891 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1892 				aprint_error_dev(sc->sc_dev,
   1893 				    "WARNING: I/O BAR at zero.\n");
   1894 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1895 					0, &sc->sc_iot, &sc->sc_ioh,
   1896 					NULL, &sc->sc_ios) == 0) {
   1897 				sc->sc_flags |= WM_F_IOH_VALID;
   1898 			} else
   1899 				aprint_error_dev(sc->sc_dev,
   1900 				    "WARNING: unable to map I/O space\n");
   1901 		}
   1902 
   1903 	}
   1904 
   1905 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1906 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1907 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1908 	if (sc->sc_type < WM_T_82542_2_1)
   1909 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1910 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1911 
   1912 	/* power up chip */
   1913 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1914 	    && error != EOPNOTSUPP) {
   1915 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1916 		return;
   1917 	}
   1918 
   1919 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1920 	/*
   1921 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1922 	 * resource.
   1923 	 */
   1924 	if (sc->sc_nqueues > 1) {
   1925 		max_type = PCI_INTR_TYPE_MSIX;
   1926 		/*
   1927 		 *  82583 has a MSI-X capability in the PCI configuration space
   1928 		 * but it doesn't support it. At least the document doesn't
   1929 		 * say anything about MSI-X.
   1930 		 */
   1931 		counts[PCI_INTR_TYPE_MSIX]
   1932 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1933 	} else {
   1934 		max_type = PCI_INTR_TYPE_MSI;
   1935 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1936 	}
   1937 
   1938 	/* Allocation settings */
   1939 	counts[PCI_INTR_TYPE_MSI] = 1;
   1940 	counts[PCI_INTR_TYPE_INTX] = 1;
   1941 	/* overridden by disable flags */
   1942 	if (wm_disable_msi != 0) {
   1943 		counts[PCI_INTR_TYPE_MSI] = 0;
   1944 		if (wm_disable_msix != 0) {
   1945 			max_type = PCI_INTR_TYPE_INTX;
   1946 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1947 		}
   1948 	} else if (wm_disable_msix != 0) {
   1949 		max_type = PCI_INTR_TYPE_MSI;
   1950 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1951 	}
   1952 
   1953 alloc_retry:
   1954 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1955 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1956 		return;
   1957 	}
   1958 
   1959 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1960 		error = wm_setup_msix(sc);
   1961 		if (error) {
   1962 			pci_intr_release(pc, sc->sc_intrs,
   1963 			    counts[PCI_INTR_TYPE_MSIX]);
   1964 
   1965 			/* Setup for MSI: Disable MSI-X */
   1966 			max_type = PCI_INTR_TYPE_MSI;
   1967 			counts[PCI_INTR_TYPE_MSI] = 1;
   1968 			counts[PCI_INTR_TYPE_INTX] = 1;
   1969 			goto alloc_retry;
   1970 		}
   1971 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1972 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1973 		error = wm_setup_legacy(sc);
   1974 		if (error) {
   1975 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1976 			    counts[PCI_INTR_TYPE_MSI]);
   1977 
   1978 			/* The next try is for INTx: Disable MSI */
   1979 			max_type = PCI_INTR_TYPE_INTX;
   1980 			counts[PCI_INTR_TYPE_INTX] = 1;
   1981 			goto alloc_retry;
   1982 		}
   1983 	} else {
   1984 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1985 		error = wm_setup_legacy(sc);
   1986 		if (error) {
   1987 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1988 			    counts[PCI_INTR_TYPE_INTX]);
   1989 			return;
   1990 		}
   1991 	}
   1992 
   1993 	/*
   1994 	 * Check the function ID (unit number of the chip).
   1995 	 */
   1996 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1997 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   1998 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1999 	    || (sc->sc_type == WM_T_82580)
   2000 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2001 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2002 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2003 	else
   2004 		sc->sc_funcid = 0;
   2005 
   2006 	/*
   2007 	 * Determine a few things about the bus we're connected to.
   2008 	 */
   2009 	if (sc->sc_type < WM_T_82543) {
   2010 		/* We don't really know the bus characteristics here. */
   2011 		sc->sc_bus_speed = 33;
   2012 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2013 		/*
   2014 		 * CSA (Communication Streaming Architecture) is about as fast
   2015 		 * a 32-bit 66MHz PCI Bus.
   2016 		 */
   2017 		sc->sc_flags |= WM_F_CSA;
   2018 		sc->sc_bus_speed = 66;
   2019 		aprint_verbose_dev(sc->sc_dev,
   2020 		    "Communication Streaming Architecture\n");
   2021 		if (sc->sc_type == WM_T_82547) {
   2022 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2023 			callout_setfunc(&sc->sc_txfifo_ch,
   2024 			    wm_82547_txfifo_stall, sc);
   2025 			aprint_verbose_dev(sc->sc_dev,
   2026 			    "using 82547 Tx FIFO stall work-around\n");
   2027 		}
   2028 	} else if (sc->sc_type >= WM_T_82571) {
   2029 		sc->sc_flags |= WM_F_PCIE;
   2030 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2031 		    && (sc->sc_type != WM_T_ICH10)
   2032 		    && (sc->sc_type != WM_T_PCH)
   2033 		    && (sc->sc_type != WM_T_PCH2)
   2034 		    && (sc->sc_type != WM_T_PCH_LPT)
   2035 		    && (sc->sc_type != WM_T_PCH_SPT)
   2036 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2037 			/* ICH* and PCH* have no PCIe capability registers */
   2038 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2039 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2040 				NULL) == 0)
   2041 				aprint_error_dev(sc->sc_dev,
   2042 				    "unable to find PCIe capability\n");
   2043 		}
   2044 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2045 	} else {
   2046 		reg = CSR_READ(sc, WMREG_STATUS);
   2047 		if (reg & STATUS_BUS64)
   2048 			sc->sc_flags |= WM_F_BUS64;
   2049 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2050 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2051 
   2052 			sc->sc_flags |= WM_F_PCIX;
   2053 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2054 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2055 				aprint_error_dev(sc->sc_dev,
   2056 				    "unable to find PCIX capability\n");
   2057 			else if (sc->sc_type != WM_T_82545_3 &&
   2058 				 sc->sc_type != WM_T_82546_3) {
   2059 				/*
   2060 				 * Work around a problem caused by the BIOS
   2061 				 * setting the max memory read byte count
   2062 				 * incorrectly.
   2063 				 */
   2064 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2065 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2066 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2067 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2068 
   2069 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2070 				    PCIX_CMD_BYTECNT_SHIFT;
   2071 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2072 				    PCIX_STATUS_MAXB_SHIFT;
   2073 				if (bytecnt > maxb) {
   2074 					aprint_verbose_dev(sc->sc_dev,
   2075 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2076 					    512 << bytecnt, 512 << maxb);
   2077 					pcix_cmd = (pcix_cmd &
   2078 					    ~PCIX_CMD_BYTECNT_MASK) |
   2079 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2080 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2081 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2082 					    pcix_cmd);
   2083 				}
   2084 			}
   2085 		}
   2086 		/*
   2087 		 * The quad port adapter is special; it has a PCIX-PCIX
   2088 		 * bridge on the board, and can run the secondary bus at
   2089 		 * a higher speed.
   2090 		 */
   2091 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2092 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2093 								      : 66;
   2094 		} else if (sc->sc_flags & WM_F_PCIX) {
   2095 			switch (reg & STATUS_PCIXSPD_MASK) {
   2096 			case STATUS_PCIXSPD_50_66:
   2097 				sc->sc_bus_speed = 66;
   2098 				break;
   2099 			case STATUS_PCIXSPD_66_100:
   2100 				sc->sc_bus_speed = 100;
   2101 				break;
   2102 			case STATUS_PCIXSPD_100_133:
   2103 				sc->sc_bus_speed = 133;
   2104 				break;
   2105 			default:
   2106 				aprint_error_dev(sc->sc_dev,
   2107 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2108 				    reg & STATUS_PCIXSPD_MASK);
   2109 				sc->sc_bus_speed = 66;
   2110 				break;
   2111 			}
   2112 		} else
   2113 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2114 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2115 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2116 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2117 	}
   2118 
   2119 	/* clear interesting stat counters */
   2120 	CSR_READ(sc, WMREG_COLC);
   2121 	CSR_READ(sc, WMREG_RXERRC);
   2122 
   2123 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2124 	    || (sc->sc_type >= WM_T_ICH8))
   2125 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2126 	if (sc->sc_type >= WM_T_ICH8)
   2127 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2128 
   2129 	/* Set PHY, NVM mutex related stuff */
   2130 	switch (sc->sc_type) {
   2131 	case WM_T_82542_2_0:
   2132 	case WM_T_82542_2_1:
   2133 	case WM_T_82543:
   2134 	case WM_T_82544:
   2135 		/* Microwire */
   2136 		sc->nvm.read = wm_nvm_read_uwire;
   2137 		sc->sc_nvm_wordsize = 64;
   2138 		sc->sc_nvm_addrbits = 6;
   2139 		break;
   2140 	case WM_T_82540:
   2141 	case WM_T_82545:
   2142 	case WM_T_82545_3:
   2143 	case WM_T_82546:
   2144 	case WM_T_82546_3:
   2145 		/* Microwire */
   2146 		sc->nvm.read = wm_nvm_read_uwire;
   2147 		reg = CSR_READ(sc, WMREG_EECD);
   2148 		if (reg & EECD_EE_SIZE) {
   2149 			sc->sc_nvm_wordsize = 256;
   2150 			sc->sc_nvm_addrbits = 8;
   2151 		} else {
   2152 			sc->sc_nvm_wordsize = 64;
   2153 			sc->sc_nvm_addrbits = 6;
   2154 		}
   2155 		sc->sc_flags |= WM_F_LOCK_EECD;
   2156 		sc->nvm.acquire = wm_get_eecd;
   2157 		sc->nvm.release = wm_put_eecd;
   2158 		break;
   2159 	case WM_T_82541:
   2160 	case WM_T_82541_2:
   2161 	case WM_T_82547:
   2162 	case WM_T_82547_2:
   2163 		reg = CSR_READ(sc, WMREG_EECD);
   2164 		/*
   2165 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2166 		 * on 8254[17], so set flags and functios before calling it.
   2167 		 */
   2168 		sc->sc_flags |= WM_F_LOCK_EECD;
   2169 		sc->nvm.acquire = wm_get_eecd;
   2170 		sc->nvm.release = wm_put_eecd;
   2171 		if (reg & EECD_EE_TYPE) {
   2172 			/* SPI */
   2173 			sc->nvm.read = wm_nvm_read_spi;
   2174 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2175 			wm_nvm_set_addrbits_size_eecd(sc);
   2176 		} else {
   2177 			/* Microwire */
   2178 			sc->nvm.read = wm_nvm_read_uwire;
   2179 			if ((reg & EECD_EE_ABITS) != 0) {
   2180 				sc->sc_nvm_wordsize = 256;
   2181 				sc->sc_nvm_addrbits = 8;
   2182 			} else {
   2183 				sc->sc_nvm_wordsize = 64;
   2184 				sc->sc_nvm_addrbits = 6;
   2185 			}
   2186 		}
   2187 		break;
   2188 	case WM_T_82571:
   2189 	case WM_T_82572:
   2190 		/* SPI */
   2191 		sc->nvm.read = wm_nvm_read_eerd;
   2192 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2193 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2194 		wm_nvm_set_addrbits_size_eecd(sc);
   2195 		sc->phy.acquire = wm_get_swsm_semaphore;
   2196 		sc->phy.release = wm_put_swsm_semaphore;
   2197 		sc->nvm.acquire = wm_get_nvm_82571;
   2198 		sc->nvm.release = wm_put_nvm_82571;
   2199 		break;
   2200 	case WM_T_82573:
   2201 	case WM_T_82574:
   2202 	case WM_T_82583:
   2203 		sc->nvm.read = wm_nvm_read_eerd;
   2204 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2205 		if (sc->sc_type == WM_T_82573) {
   2206 			sc->phy.acquire = wm_get_swsm_semaphore;
   2207 			sc->phy.release = wm_put_swsm_semaphore;
   2208 			sc->nvm.acquire = wm_get_nvm_82571;
   2209 			sc->nvm.release = wm_put_nvm_82571;
   2210 		} else {
   2211 			/* Both PHY and NVM use the same semaphore. */
   2212 			sc->phy.acquire = sc->nvm.acquire
   2213 			    = wm_get_swfwhw_semaphore;
   2214 			sc->phy.release = sc->nvm.release
   2215 			    = wm_put_swfwhw_semaphore;
   2216 		}
   2217 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2218 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2219 			sc->sc_nvm_wordsize = 2048;
   2220 		} else {
   2221 			/* SPI */
   2222 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2223 			wm_nvm_set_addrbits_size_eecd(sc);
   2224 		}
   2225 		break;
   2226 	case WM_T_82575:
   2227 	case WM_T_82576:
   2228 	case WM_T_82580:
   2229 	case WM_T_I350:
   2230 	case WM_T_I354:
   2231 	case WM_T_80003:
   2232 		/* SPI */
   2233 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2234 		wm_nvm_set_addrbits_size_eecd(sc);
   2235 		if ((sc->sc_type == WM_T_80003)
   2236 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2237 			sc->nvm.read = wm_nvm_read_eerd;
   2238 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2239 		} else {
   2240 			sc->nvm.read = wm_nvm_read_spi;
   2241 			sc->sc_flags |= WM_F_LOCK_EECD;
   2242 		}
   2243 		sc->phy.acquire = wm_get_phy_82575;
   2244 		sc->phy.release = wm_put_phy_82575;
   2245 		sc->nvm.acquire = wm_get_nvm_80003;
   2246 		sc->nvm.release = wm_put_nvm_80003;
   2247 		break;
   2248 	case WM_T_ICH8:
   2249 	case WM_T_ICH9:
   2250 	case WM_T_ICH10:
   2251 	case WM_T_PCH:
   2252 	case WM_T_PCH2:
   2253 	case WM_T_PCH_LPT:
   2254 		sc->nvm.read = wm_nvm_read_ich8;
   2255 		/* FLASH */
   2256 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2257 		sc->sc_nvm_wordsize = 2048;
   2258 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2259 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2260 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2261 			aprint_error_dev(sc->sc_dev,
   2262 			    "can't map FLASH registers\n");
   2263 			goto out;
   2264 		}
   2265 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2266 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2267 		    ICH_FLASH_SECTOR_SIZE;
   2268 		sc->sc_ich8_flash_bank_size =
   2269 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2270 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2271 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2272 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2273 		sc->sc_flashreg_offset = 0;
   2274 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2275 		sc->phy.release = wm_put_swflag_ich8lan;
   2276 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2277 		sc->nvm.release = wm_put_nvm_ich8lan;
   2278 		break;
   2279 	case WM_T_PCH_SPT:
   2280 	case WM_T_PCH_CNP:
   2281 		sc->nvm.read = wm_nvm_read_spt;
   2282 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2283 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2284 		sc->sc_flasht = sc->sc_st;
   2285 		sc->sc_flashh = sc->sc_sh;
   2286 		sc->sc_ich8_flash_base = 0;
   2287 		sc->sc_nvm_wordsize =
   2288 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2289 		    * NVM_SIZE_MULTIPLIER;
   2290 		/* It is size in bytes, we want words */
   2291 		sc->sc_nvm_wordsize /= 2;
   2292 		/* assume 2 banks */
   2293 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2294 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2295 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2296 		sc->phy.release = wm_put_swflag_ich8lan;
   2297 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2298 		sc->nvm.release = wm_put_nvm_ich8lan;
   2299 		break;
   2300 	case WM_T_I210:
   2301 	case WM_T_I211:
   2302 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2303 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2304 		if (wm_nvm_flash_presence_i210(sc)) {
   2305 			sc->nvm.read = wm_nvm_read_eerd;
   2306 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2307 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2308 			wm_nvm_set_addrbits_size_eecd(sc);
   2309 		} else {
   2310 			sc->nvm.read = wm_nvm_read_invm;
   2311 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2312 			sc->sc_nvm_wordsize = INVM_SIZE;
   2313 		}
   2314 		sc->phy.acquire = wm_get_phy_82575;
   2315 		sc->phy.release = wm_put_phy_82575;
   2316 		sc->nvm.acquire = wm_get_nvm_80003;
   2317 		sc->nvm.release = wm_put_nvm_80003;
   2318 		break;
   2319 	default:
   2320 		break;
   2321 	}
   2322 
   2323 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2324 	switch (sc->sc_type) {
   2325 	case WM_T_82571:
   2326 	case WM_T_82572:
   2327 		reg = CSR_READ(sc, WMREG_SWSM2);
   2328 		if ((reg & SWSM2_LOCK) == 0) {
   2329 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2330 			force_clear_smbi = true;
   2331 		} else
   2332 			force_clear_smbi = false;
   2333 		break;
   2334 	case WM_T_82573:
   2335 	case WM_T_82574:
   2336 	case WM_T_82583:
   2337 		force_clear_smbi = true;
   2338 		break;
   2339 	default:
   2340 		force_clear_smbi = false;
   2341 		break;
   2342 	}
   2343 	if (force_clear_smbi) {
   2344 		reg = CSR_READ(sc, WMREG_SWSM);
   2345 		if ((reg & SWSM_SMBI) != 0)
   2346 			aprint_error_dev(sc->sc_dev,
   2347 			    "Please update the Bootagent\n");
   2348 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2349 	}
   2350 
   2351 	/*
   2352 	 * Defer printing the EEPROM type until after verifying the checksum
   2353 	 * This allows the EEPROM type to be printed correctly in the case
   2354 	 * that no EEPROM is attached.
   2355 	 */
   2356 	/*
   2357 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2358 	 * this for later, so we can fail future reads from the EEPROM.
   2359 	 */
   2360 	if (wm_nvm_validate_checksum(sc)) {
   2361 		/*
   2362 		 * Read twice again because some PCI-e parts fail the
   2363 		 * first check due to the link being in sleep state.
   2364 		 */
   2365 		if (wm_nvm_validate_checksum(sc))
   2366 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2367 	}
   2368 
   2369 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2370 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2371 	else {
   2372 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2373 		    sc->sc_nvm_wordsize);
   2374 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2375 			aprint_verbose("iNVM");
   2376 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2377 			aprint_verbose("FLASH(HW)");
   2378 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2379 			aprint_verbose("FLASH");
   2380 		else {
   2381 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2382 				eetype = "SPI";
   2383 			else
   2384 				eetype = "MicroWire";
   2385 			aprint_verbose("(%d address bits) %s EEPROM",
   2386 			    sc->sc_nvm_addrbits, eetype);
   2387 		}
   2388 	}
   2389 	wm_nvm_version(sc);
   2390 	aprint_verbose("\n");
   2391 
   2392 	/*
   2393 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2394 	 * incorrect.
   2395 	 */
   2396 	wm_gmii_setup_phytype(sc, 0, 0);
   2397 
   2398 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2399 	switch (sc->sc_type) {
   2400 	case WM_T_ICH8:
   2401 	case WM_T_ICH9:
   2402 	case WM_T_ICH10:
   2403 	case WM_T_PCH:
   2404 	case WM_T_PCH2:
   2405 	case WM_T_PCH_LPT:
   2406 	case WM_T_PCH_SPT:
   2407 	case WM_T_PCH_CNP:
   2408 		apme_mask = WUC_APME;
   2409 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2410 		if ((eeprom_data & apme_mask) != 0)
   2411 			sc->sc_flags |= WM_F_WOL;
   2412 		break;
   2413 	default:
   2414 		break;
   2415 	}
   2416 
   2417 	/* Reset the chip to a known state. */
   2418 	wm_reset(sc);
   2419 
   2420 	/*
   2421 	 * Check for I21[01] PLL workaround.
   2422 	 *
   2423 	 * Three cases:
   2424 	 * a) Chip is I211.
   2425 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2426 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2427 	 */
   2428 	if (sc->sc_type == WM_T_I211)
   2429 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2430 	if (sc->sc_type == WM_T_I210) {
   2431 		if (!wm_nvm_flash_presence_i210(sc))
   2432 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2433 		else if ((sc->sc_nvm_ver_major < 3)
   2434 		    || ((sc->sc_nvm_ver_major == 3)
   2435 			&& (sc->sc_nvm_ver_minor < 25))) {
   2436 			aprint_verbose_dev(sc->sc_dev,
   2437 			    "ROM image version %d.%d is older than 3.25\n",
   2438 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2439 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2440 		}
   2441 	}
   2442 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2443 		wm_pll_workaround_i210(sc);
   2444 
   2445 	wm_get_wakeup(sc);
   2446 
   2447 	/* Non-AMT based hardware can now take control from firmware */
   2448 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2449 		wm_get_hw_control(sc);
   2450 
   2451 	/*
   2452 	 * Read the Ethernet address from the EEPROM, if not first found
   2453 	 * in device properties.
   2454 	 */
   2455 	ea = prop_dictionary_get(dict, "mac-address");
   2456 	if (ea != NULL) {
   2457 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2458 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2459 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2460 	} else {
   2461 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2462 			aprint_error_dev(sc->sc_dev,
   2463 			    "unable to read Ethernet address\n");
   2464 			goto out;
   2465 		}
   2466 	}
   2467 
   2468 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2469 	    ether_sprintf(enaddr));
   2470 
   2471 	/*
   2472 	 * Read the config info from the EEPROM, and set up various
   2473 	 * bits in the control registers based on their contents.
   2474 	 */
   2475 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2476 	if (pn != NULL) {
   2477 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2478 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2479 	} else {
   2480 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2481 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2482 			goto out;
   2483 		}
   2484 	}
   2485 
   2486 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2487 	if (pn != NULL) {
   2488 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2489 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2490 	} else {
   2491 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2492 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2493 			goto out;
   2494 		}
   2495 	}
   2496 
   2497 	/* check for WM_F_WOL */
   2498 	switch (sc->sc_type) {
   2499 	case WM_T_82542_2_0:
   2500 	case WM_T_82542_2_1:
   2501 	case WM_T_82543:
   2502 		/* dummy? */
   2503 		eeprom_data = 0;
   2504 		apme_mask = NVM_CFG3_APME;
   2505 		break;
   2506 	case WM_T_82544:
   2507 		apme_mask = NVM_CFG2_82544_APM_EN;
   2508 		eeprom_data = cfg2;
   2509 		break;
   2510 	case WM_T_82546:
   2511 	case WM_T_82546_3:
   2512 	case WM_T_82571:
   2513 	case WM_T_82572:
   2514 	case WM_T_82573:
   2515 	case WM_T_82574:
   2516 	case WM_T_82583:
   2517 	case WM_T_80003:
   2518 	case WM_T_82575:
   2519 	case WM_T_82576:
   2520 		apme_mask = NVM_CFG3_APME;
   2521 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2522 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2523 		break;
   2524 	case WM_T_82580:
   2525 	case WM_T_I350:
   2526 	case WM_T_I354:
   2527 	case WM_T_I210:
   2528 	case WM_T_I211:
   2529 		apme_mask = NVM_CFG3_APME;
   2530 		wm_nvm_read(sc,
   2531 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2532 		    1, &eeprom_data);
   2533 		break;
   2534 	case WM_T_ICH8:
   2535 	case WM_T_ICH9:
   2536 	case WM_T_ICH10:
   2537 	case WM_T_PCH:
   2538 	case WM_T_PCH2:
   2539 	case WM_T_PCH_LPT:
   2540 	case WM_T_PCH_SPT:
   2541 	case WM_T_PCH_CNP:
   2542 		/* Already checked before wm_reset () */
   2543 		apme_mask = eeprom_data = 0;
   2544 		break;
   2545 	default: /* XXX 82540 */
   2546 		apme_mask = NVM_CFG3_APME;
   2547 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2548 		break;
   2549 	}
   2550 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2551 	if ((eeprom_data & apme_mask) != 0)
   2552 		sc->sc_flags |= WM_F_WOL;
   2553 
   2554 	/*
   2555 	 * We have the eeprom settings, now apply the special cases
   2556 	 * where the eeprom may be wrong or the board won't support
   2557 	 * wake on lan on a particular port
   2558 	 */
   2559 	switch (sc->sc_pcidevid) {
   2560 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2561 		sc->sc_flags &= ~WM_F_WOL;
   2562 		break;
   2563 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2564 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2565 		/* Wake events only supported on port A for dual fiber
   2566 		 * regardless of eeprom setting */
   2567 		if (sc->sc_funcid == 1)
   2568 			sc->sc_flags &= ~WM_F_WOL;
   2569 		break;
   2570 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2571 		/* if quad port adapter, disable WoL on all but port A */
   2572 		if (sc->sc_funcid != 0)
   2573 			sc->sc_flags &= ~WM_F_WOL;
   2574 		break;
   2575 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2576 		/* Wake events only supported on port A for dual fiber
   2577 		 * regardless of eeprom setting */
   2578 		if (sc->sc_funcid == 1)
   2579 			sc->sc_flags &= ~WM_F_WOL;
   2580 		break;
   2581 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2582 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2583 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2584 		/* if quad port adapter, disable WoL on all but port A */
   2585 		if (sc->sc_funcid != 0)
   2586 			sc->sc_flags &= ~WM_F_WOL;
   2587 		break;
   2588 	}
   2589 
   2590 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2591 		/* Check NVM for autonegotiation */
   2592 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2593 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2594 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2595 		}
   2596 	}
   2597 
   2598 	/*
   2599 	 * XXX need special handling for some multiple port cards
   2600 	 * to disable a paticular port.
   2601 	 */
   2602 
   2603 	if (sc->sc_type >= WM_T_82544) {
   2604 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2605 		if (pn != NULL) {
   2606 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2607 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2608 		} else {
   2609 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2610 				aprint_error_dev(sc->sc_dev,
   2611 				    "unable to read SWDPIN\n");
   2612 				goto out;
   2613 			}
   2614 		}
   2615 	}
   2616 
   2617 	if (cfg1 & NVM_CFG1_ILOS)
   2618 		sc->sc_ctrl |= CTRL_ILOS;
   2619 
   2620 	/*
   2621 	 * XXX
   2622 	 * This code isn't correct because pin 2 and 3 are located
   2623 	 * in different position on newer chips. Check all datasheet.
   2624 	 *
   2625 	 * Until resolve this problem, check if a chip < 82580
   2626 	 */
   2627 	if (sc->sc_type <= WM_T_82580) {
   2628 		if (sc->sc_type >= WM_T_82544) {
   2629 			sc->sc_ctrl |=
   2630 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2631 			    CTRL_SWDPIO_SHIFT;
   2632 			sc->sc_ctrl |=
   2633 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2634 			    CTRL_SWDPINS_SHIFT;
   2635 		} else {
   2636 			sc->sc_ctrl |=
   2637 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2638 			    CTRL_SWDPIO_SHIFT;
   2639 		}
   2640 	}
   2641 
   2642 	/* XXX For other than 82580? */
   2643 	if (sc->sc_type == WM_T_82580) {
   2644 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2645 		if (nvmword & __BIT(13))
   2646 			sc->sc_ctrl |= CTRL_ILOS;
   2647 	}
   2648 
   2649 #if 0
   2650 	if (sc->sc_type >= WM_T_82544) {
   2651 		if (cfg1 & NVM_CFG1_IPS0)
   2652 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2653 		if (cfg1 & NVM_CFG1_IPS1)
   2654 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2655 		sc->sc_ctrl_ext |=
   2656 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2657 		    CTRL_EXT_SWDPIO_SHIFT;
   2658 		sc->sc_ctrl_ext |=
   2659 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2660 		    CTRL_EXT_SWDPINS_SHIFT;
   2661 	} else {
   2662 		sc->sc_ctrl_ext |=
   2663 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2664 		    CTRL_EXT_SWDPIO_SHIFT;
   2665 	}
   2666 #endif
   2667 
   2668 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2669 #if 0
   2670 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2671 #endif
   2672 
   2673 	if (sc->sc_type == WM_T_PCH) {
   2674 		uint16_t val;
   2675 
   2676 		/* Save the NVM K1 bit setting */
   2677 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2678 
   2679 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2680 			sc->sc_nvm_k1_enabled = 1;
   2681 		else
   2682 			sc->sc_nvm_k1_enabled = 0;
   2683 	}
   2684 
   2685 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2686 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2687 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2688 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2689 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2690 	    || sc->sc_type == WM_T_82573
   2691 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2692 		/* Copper only */
   2693 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2694 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2695 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2696 	    || (sc->sc_type ==WM_T_I211)) {
   2697 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2698 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2699 		switch (link_mode) {
   2700 		case CTRL_EXT_LINK_MODE_1000KX:
   2701 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2702 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2703 			break;
   2704 		case CTRL_EXT_LINK_MODE_SGMII:
   2705 			if (wm_sgmii_uses_mdio(sc)) {
   2706 				aprint_verbose_dev(sc->sc_dev,
   2707 				    "SGMII(MDIO)\n");
   2708 				sc->sc_flags |= WM_F_SGMII;
   2709 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2710 				break;
   2711 			}
   2712 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2713 			/*FALLTHROUGH*/
   2714 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2715 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2716 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2717 				if (link_mode
   2718 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2719 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2720 					sc->sc_flags |= WM_F_SGMII;
   2721 				} else {
   2722 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2723 					aprint_verbose_dev(sc->sc_dev,
   2724 					    "SERDES\n");
   2725 				}
   2726 				break;
   2727 			}
   2728 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2729 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2730 
   2731 			/* Change current link mode setting */
   2732 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2733 			switch (sc->sc_mediatype) {
   2734 			case WM_MEDIATYPE_COPPER:
   2735 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2736 				break;
   2737 			case WM_MEDIATYPE_SERDES:
   2738 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2739 				break;
   2740 			default:
   2741 				break;
   2742 			}
   2743 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2744 			break;
   2745 		case CTRL_EXT_LINK_MODE_GMII:
   2746 		default:
   2747 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2748 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2749 			break;
   2750 		}
   2751 
   2752 		reg &= ~CTRL_EXT_I2C_ENA;
   2753 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2754 			reg |= CTRL_EXT_I2C_ENA;
   2755 		else
   2756 			reg &= ~CTRL_EXT_I2C_ENA;
   2757 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2758 	} else if (sc->sc_type < WM_T_82543 ||
   2759 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2760 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2761 			aprint_error_dev(sc->sc_dev,
   2762 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2763 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2764 		}
   2765 	} else {
   2766 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2767 			aprint_error_dev(sc->sc_dev,
   2768 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2769 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2770 		}
   2771 	}
   2772 
   2773 	if (sc->sc_type >= WM_T_PCH2)
   2774 		sc->sc_flags |= WM_F_EEE;
   2775 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2776 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2777 		/* XXX: Need special handling for I354. (not yet) */
   2778 		if (sc->sc_type != WM_T_I354)
   2779 			sc->sc_flags |= WM_F_EEE;
   2780 	}
   2781 
   2782 	/* Set device properties (macflags) */
   2783 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2784 
   2785 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2786 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2787 
   2788 	/* Initialize the media structures accordingly. */
   2789 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2790 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2791 	else
   2792 		wm_tbi_mediainit(sc); /* All others */
   2793 
   2794 	ifp = &sc->sc_ethercom.ec_if;
   2795 	xname = device_xname(sc->sc_dev);
   2796 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2797 	ifp->if_softc = sc;
   2798 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2799 #ifdef WM_MPSAFE
   2800 	ifp->if_extflags = IFEF_MPSAFE;
   2801 #endif
   2802 	ifp->if_ioctl = wm_ioctl;
   2803 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2804 		ifp->if_start = wm_nq_start;
   2805 		/*
   2806 		 * When the number of CPUs is one and the controller can use
   2807 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2808 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2809 		 * and the other is used for link status changing.
   2810 		 * In this situation, wm_nq_transmit() is disadvantageous
   2811 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2812 		 */
   2813 		if (wm_is_using_multiqueue(sc))
   2814 			ifp->if_transmit = wm_nq_transmit;
   2815 	} else {
   2816 		ifp->if_start = wm_start;
   2817 		/*
   2818 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2819 		 */
   2820 		if (wm_is_using_multiqueue(sc))
   2821 			ifp->if_transmit = wm_transmit;
   2822 	}
   2823 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2824 	ifp->if_init = wm_init;
   2825 	ifp->if_stop = wm_stop;
   2826 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2827 	IFQ_SET_READY(&ifp->if_snd);
   2828 
   2829 	/* Check for jumbo frame */
   2830 	switch (sc->sc_type) {
   2831 	case WM_T_82573:
   2832 		/* XXX limited to 9234 if ASPM is disabled */
   2833 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2834 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2835 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2836 		break;
   2837 	case WM_T_82571:
   2838 	case WM_T_82572:
   2839 	case WM_T_82574:
   2840 	case WM_T_82583:
   2841 	case WM_T_82575:
   2842 	case WM_T_82576:
   2843 	case WM_T_82580:
   2844 	case WM_T_I350:
   2845 	case WM_T_I354:
   2846 	case WM_T_I210:
   2847 	case WM_T_I211:
   2848 	case WM_T_80003:
   2849 	case WM_T_ICH9:
   2850 	case WM_T_ICH10:
   2851 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2852 	case WM_T_PCH_LPT:
   2853 	case WM_T_PCH_SPT:
   2854 	case WM_T_PCH_CNP:
   2855 		/* XXX limited to 9234 */
   2856 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2857 		break;
   2858 	case WM_T_PCH:
   2859 		/* XXX limited to 4096 */
   2860 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2861 		break;
   2862 	case WM_T_82542_2_0:
   2863 	case WM_T_82542_2_1:
   2864 	case WM_T_ICH8:
   2865 		/* No support for jumbo frame */
   2866 		break;
   2867 	default:
   2868 		/* ETHER_MAX_LEN_JUMBO */
   2869 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2870 		break;
   2871 	}
   2872 
   2873 	/* If we're a i82543 or greater, we can support VLANs. */
   2874 	if (sc->sc_type >= WM_T_82543)
   2875 		sc->sc_ethercom.ec_capabilities |=
   2876 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2877 
   2878 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2879 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2880 
   2881 	/*
   2882 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2883 	 * on i82543 and later.
   2884 	 */
   2885 	if (sc->sc_type >= WM_T_82543) {
   2886 		ifp->if_capabilities |=
   2887 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2888 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2889 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2890 		    IFCAP_CSUM_TCPv6_Tx |
   2891 		    IFCAP_CSUM_UDPv6_Tx;
   2892 	}
   2893 
   2894 	/*
   2895 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2896 	 *
   2897 	 *	82541GI (8086:1076) ... no
   2898 	 *	82572EI (8086:10b9) ... yes
   2899 	 */
   2900 	if (sc->sc_type >= WM_T_82571) {
   2901 		ifp->if_capabilities |=
   2902 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2903 	}
   2904 
   2905 	/*
   2906 	 * If we're a i82544 or greater (except i82547), we can do
   2907 	 * TCP segmentation offload.
   2908 	 */
   2909 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2910 		ifp->if_capabilities |= IFCAP_TSOv4;
   2911 	}
   2912 
   2913 	if (sc->sc_type >= WM_T_82571) {
   2914 		ifp->if_capabilities |= IFCAP_TSOv6;
   2915 	}
   2916 
   2917 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2918 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2919 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2920 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2921 
   2922 #ifdef WM_MPSAFE
   2923 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2924 #else
   2925 	sc->sc_core_lock = NULL;
   2926 #endif
   2927 
   2928 	/* Attach the interface. */
   2929 	error = if_initialize(ifp);
   2930 	if (error != 0) {
   2931 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2932 		    error);
   2933 		return; /* Error */
   2934 	}
   2935 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2936 	ether_ifattach(ifp, enaddr);
   2937 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2938 	if_register(ifp);
   2939 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2940 	    RND_FLAG_DEFAULT);
   2941 
   2942 #ifdef WM_EVENT_COUNTERS
   2943 	/* Attach event counters. */
   2944 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2945 	    NULL, xname, "linkintr");
   2946 
   2947 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2948 	    NULL, xname, "tx_xoff");
   2949 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2950 	    NULL, xname, "tx_xon");
   2951 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2952 	    NULL, xname, "rx_xoff");
   2953 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2954 	    NULL, xname, "rx_xon");
   2955 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2956 	    NULL, xname, "rx_macctl");
   2957 #endif /* WM_EVENT_COUNTERS */
   2958 
   2959 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2960 		pmf_class_network_register(self, ifp);
   2961 	else
   2962 		aprint_error_dev(self, "couldn't establish power handler\n");
   2963 
   2964 	sc->sc_flags |= WM_F_ATTACHED;
   2965 out:
   2966 	return;
   2967 }
   2968 
   2969 /* The detach function (ca_detach) */
   2970 static int
   2971 wm_detach(device_t self, int flags __unused)
   2972 {
   2973 	struct wm_softc *sc = device_private(self);
   2974 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2975 	int i;
   2976 
   2977 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2978 		return 0;
   2979 
   2980 	/* Stop the interface. Callouts are stopped in it. */
   2981 	wm_stop(ifp, 1);
   2982 
   2983 	pmf_device_deregister(self);
   2984 
   2985 #ifdef WM_EVENT_COUNTERS
   2986 	evcnt_detach(&sc->sc_ev_linkintr);
   2987 
   2988 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2989 	evcnt_detach(&sc->sc_ev_tx_xon);
   2990 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2991 	evcnt_detach(&sc->sc_ev_rx_xon);
   2992 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2993 #endif /* WM_EVENT_COUNTERS */
   2994 
   2995 	/* Tell the firmware about the release */
   2996 	WM_CORE_LOCK(sc);
   2997 	wm_release_manageability(sc);
   2998 	wm_release_hw_control(sc);
   2999 	wm_enable_wakeup(sc);
   3000 	WM_CORE_UNLOCK(sc);
   3001 
   3002 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3003 
   3004 	/* Delete all remaining media. */
   3005 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   3006 
   3007 	ether_ifdetach(ifp);
   3008 	if_detach(ifp);
   3009 	if_percpuq_destroy(sc->sc_ipq);
   3010 
   3011 	/* Unload RX dmamaps and free mbufs */
   3012 	for (i = 0; i < sc->sc_nqueues; i++) {
   3013 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3014 		mutex_enter(rxq->rxq_lock);
   3015 		wm_rxdrain(rxq);
   3016 		mutex_exit(rxq->rxq_lock);
   3017 	}
   3018 	/* Must unlock here */
   3019 
   3020 	/* Disestablish the interrupt handler */
   3021 	for (i = 0; i < sc->sc_nintrs; i++) {
   3022 		if (sc->sc_ihs[i] != NULL) {
   3023 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3024 			sc->sc_ihs[i] = NULL;
   3025 		}
   3026 	}
   3027 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3028 
   3029 	wm_free_txrx_queues(sc);
   3030 
   3031 	/* Unmap the registers */
   3032 	if (sc->sc_ss) {
   3033 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3034 		sc->sc_ss = 0;
   3035 	}
   3036 	if (sc->sc_ios) {
   3037 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3038 		sc->sc_ios = 0;
   3039 	}
   3040 	if (sc->sc_flashs) {
   3041 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3042 		sc->sc_flashs = 0;
   3043 	}
   3044 
   3045 	if (sc->sc_core_lock)
   3046 		mutex_obj_free(sc->sc_core_lock);
   3047 	if (sc->sc_ich_phymtx)
   3048 		mutex_obj_free(sc->sc_ich_phymtx);
   3049 	if (sc->sc_ich_nvmmtx)
   3050 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3051 
   3052 	return 0;
   3053 }
   3054 
   3055 static bool
   3056 wm_suspend(device_t self, const pmf_qual_t *qual)
   3057 {
   3058 	struct wm_softc *sc = device_private(self);
   3059 
   3060 	wm_release_manageability(sc);
   3061 	wm_release_hw_control(sc);
   3062 	wm_enable_wakeup(sc);
   3063 
   3064 	return true;
   3065 }
   3066 
   3067 static bool
   3068 wm_resume(device_t self, const pmf_qual_t *qual)
   3069 {
   3070 	struct wm_softc *sc = device_private(self);
   3071 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3072 	pcireg_t reg;
   3073 	char buf[256];
   3074 
   3075 	reg = CSR_READ(sc, WMREG_WUS);
   3076 	if (reg != 0) {
   3077 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3078 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3079 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3080 	}
   3081 
   3082 	if (sc->sc_type >= WM_T_PCH2)
   3083 		wm_resume_workarounds_pchlan(sc);
   3084 	if ((ifp->if_flags & IFF_UP) == 0) {
   3085 		wm_reset(sc);
   3086 		/* Non-AMT based hardware can now take control from firmware */
   3087 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3088 			wm_get_hw_control(sc);
   3089 		wm_init_manageability(sc);
   3090 	} else {
   3091 		/*
   3092 		 * We called pmf_class_network_register(), so if_init() is
   3093 		 * automatically called when IFF_UP. wm_reset(),
   3094 		 * wm_get_hw_control() and wm_init_manageability() are called
   3095 		 * via wm_init().
   3096 		 */
   3097 	}
   3098 
   3099 	return true;
   3100 }
   3101 
   3102 /*
   3103  * wm_watchdog:		[ifnet interface function]
   3104  *
   3105  *	Watchdog timer handler.
   3106  */
   3107 static void
   3108 wm_watchdog(struct ifnet *ifp)
   3109 {
   3110 	int qid;
   3111 	struct wm_softc *sc = ifp->if_softc;
   3112 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3113 
   3114 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3115 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3116 
   3117 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3118 	}
   3119 
   3120 	/*
   3121 	 * IF any of queues hanged up, reset the interface.
   3122 	 */
   3123 	if (hang_queue != 0) {
   3124 		(void) wm_init(ifp);
   3125 
   3126 		/*
   3127 		 * There are still some upper layer processing which call
   3128 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3129 		 */
   3130 		/* Try to get more packets going. */
   3131 		ifp->if_start(ifp);
   3132 	}
   3133 }
   3134 
   3135 
   3136 static void
   3137 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3138 {
   3139 
   3140 	mutex_enter(txq->txq_lock);
   3141 	if (txq->txq_sending &&
   3142 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3143 		wm_watchdog_txq_locked(ifp, txq, hang);
   3144 	}
   3145 	mutex_exit(txq->txq_lock);
   3146 }
   3147 
   3148 static void
   3149 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3150     uint16_t *hang)
   3151 {
   3152 	struct wm_softc *sc = ifp->if_softc;
   3153 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3154 
   3155 	KASSERT(mutex_owned(txq->txq_lock));
   3156 
   3157 	/*
   3158 	 * Since we're using delayed interrupts, sweep up
   3159 	 * before we report an error.
   3160 	 */
   3161 	wm_txeof(txq, UINT_MAX);
   3162 
   3163 	if (txq->txq_sending)
   3164 		*hang |= __BIT(wmq->wmq_id);
   3165 
   3166 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3167 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3168 		    device_xname(sc->sc_dev));
   3169 	} else {
   3170 #ifdef WM_DEBUG
   3171 		int i, j;
   3172 		struct wm_txsoft *txs;
   3173 #endif
   3174 		log(LOG_ERR,
   3175 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3176 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3177 		    txq->txq_next);
   3178 		ifp->if_oerrors++;
   3179 #ifdef WM_DEBUG
   3180 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3181 		    i = WM_NEXTTXS(txq, i)) {
   3182 		    txs = &txq->txq_soft[i];
   3183 		    printf("txs %d tx %d -> %d\n",
   3184 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3185 		    for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3186 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3187 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3188 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3189 				    printf("\t %#08x%08x\n",
   3190 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3191 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3192 			    } else {
   3193 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3194 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3195 					txq->txq_descs[j].wtx_addr.wa_low);
   3196 				    printf("\t %#04x%02x%02x%08x\n",
   3197 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3198 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3199 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3200 					txq->txq_descs[j].wtx_cmdlen);
   3201 			    }
   3202 			if (j == txs->txs_lastdesc)
   3203 				break;
   3204 			}
   3205 		}
   3206 #endif
   3207 	}
   3208 }
   3209 
   3210 /*
   3211  * wm_tick:
   3212  *
   3213  *	One second timer, used to check link status, sweep up
   3214  *	completed transmit jobs, etc.
   3215  */
   3216 static void
   3217 wm_tick(void *arg)
   3218 {
   3219 	struct wm_softc *sc = arg;
   3220 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3221 #ifndef WM_MPSAFE
   3222 	int s = splnet();
   3223 #endif
   3224 
   3225 	WM_CORE_LOCK(sc);
   3226 
   3227 	if (sc->sc_core_stopping) {
   3228 		WM_CORE_UNLOCK(sc);
   3229 #ifndef WM_MPSAFE
   3230 		splx(s);
   3231 #endif
   3232 		return;
   3233 	}
   3234 
   3235 	if (sc->sc_type >= WM_T_82542_2_1) {
   3236 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3237 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3238 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3239 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3240 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3241 	}
   3242 
   3243 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3244 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3245 	    + CSR_READ(sc, WMREG_CRCERRS)
   3246 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3247 	    + CSR_READ(sc, WMREG_SYMERRC)
   3248 	    + CSR_READ(sc, WMREG_RXERRC)
   3249 	    + CSR_READ(sc, WMREG_SEC)
   3250 	    + CSR_READ(sc, WMREG_CEXTERR)
   3251 	    + CSR_READ(sc, WMREG_RLEC);
   3252 	/*
   3253 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3254 	 * memory. It does not mean the number of dropped packet. Because
   3255 	 * ethernet controller can receive packets in such case if there is
   3256 	 * space in phy's FIFO.
   3257 	 *
   3258 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3259 	 * own EVCNT instead of if_iqdrops.
   3260 	 */
   3261 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3262 
   3263 	if (sc->sc_flags & WM_F_HAS_MII)
   3264 		mii_tick(&sc->sc_mii);
   3265 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3266 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3267 		wm_serdes_tick(sc);
   3268 	else
   3269 		wm_tbi_tick(sc);
   3270 
   3271 	WM_CORE_UNLOCK(sc);
   3272 
   3273 	wm_watchdog(ifp);
   3274 
   3275 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3276 }
   3277 
   3278 static int
   3279 wm_ifflags_cb(struct ethercom *ec)
   3280 {
   3281 	struct ifnet *ifp = &ec->ec_if;
   3282 	struct wm_softc *sc = ifp->if_softc;
   3283 	int iffchange, ecchange;
   3284 	bool needreset = false;
   3285 	int rc = 0;
   3286 
   3287 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3288 		device_xname(sc->sc_dev), __func__));
   3289 
   3290 	WM_CORE_LOCK(sc);
   3291 
   3292 	/*
   3293 	 * Check for if_flags.
   3294 	 * Main usage is to prevent linkdown when opening bpf.
   3295 	 */
   3296 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3297 	sc->sc_if_flags = ifp->if_flags;
   3298 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3299 		needreset = true;
   3300 		goto ec;
   3301 	}
   3302 
   3303 	/* iff related updates */
   3304 	if ((iffchange & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3305 		wm_set_filter(sc);
   3306 
   3307 	wm_set_vlan(sc);
   3308 
   3309 ec:
   3310 	/* Check for ec_capenable. */
   3311 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3312 	sc->sc_ec_capenable = ec->ec_capenable;
   3313 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3314 		needreset = true;
   3315 		goto out;
   3316 	}
   3317 
   3318 	/* ec related updates */
   3319 	wm_set_eee(sc);
   3320 
   3321 out:
   3322 	if (needreset)
   3323 		rc = ENETRESET;
   3324 	WM_CORE_UNLOCK(sc);
   3325 
   3326 	return rc;
   3327 }
   3328 
   3329 /*
   3330  * wm_ioctl:		[ifnet interface function]
   3331  *
   3332  *	Handle control requests from the operator.
   3333  */
   3334 static int
   3335 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3336 {
   3337 	struct wm_softc *sc = ifp->if_softc;
   3338 	struct ifreq *ifr = (struct ifreq *) data;
   3339 	struct ifaddr *ifa = (struct ifaddr *)data;
   3340 	struct sockaddr_dl *sdl;
   3341 	int s, error;
   3342 
   3343 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3344 		device_xname(sc->sc_dev), __func__));
   3345 
   3346 #ifndef WM_MPSAFE
   3347 	s = splnet();
   3348 #endif
   3349 	switch (cmd) {
   3350 	case SIOCSIFMEDIA:
   3351 	case SIOCGIFMEDIA:
   3352 		WM_CORE_LOCK(sc);
   3353 		/* Flow control requires full-duplex mode. */
   3354 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3355 		    (ifr->ifr_media & IFM_FDX) == 0)
   3356 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3357 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3358 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3359 				/* We can do both TXPAUSE and RXPAUSE. */
   3360 				ifr->ifr_media |=
   3361 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3362 			}
   3363 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3364 		}
   3365 		WM_CORE_UNLOCK(sc);
   3366 #ifdef WM_MPSAFE
   3367 		s = splnet();
   3368 #endif
   3369 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3370 #ifdef WM_MPSAFE
   3371 		splx(s);
   3372 #endif
   3373 		break;
   3374 	case SIOCINITIFADDR:
   3375 		WM_CORE_LOCK(sc);
   3376 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3377 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3378 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3379 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3380 			/* unicast address is first multicast entry */
   3381 			wm_set_filter(sc);
   3382 			error = 0;
   3383 			WM_CORE_UNLOCK(sc);
   3384 			break;
   3385 		}
   3386 		WM_CORE_UNLOCK(sc);
   3387 		/*FALLTHROUGH*/
   3388 	default:
   3389 #ifdef WM_MPSAFE
   3390 		s = splnet();
   3391 #endif
   3392 		/* It may call wm_start, so unlock here */
   3393 		error = ether_ioctl(ifp, cmd, data);
   3394 #ifdef WM_MPSAFE
   3395 		splx(s);
   3396 #endif
   3397 		if (error != ENETRESET)
   3398 			break;
   3399 
   3400 		error = 0;
   3401 
   3402 		if (cmd == SIOCSIFCAP)
   3403 			error = (*ifp->if_init)(ifp);
   3404 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3405 			;
   3406 		else if (ifp->if_flags & IFF_RUNNING) {
   3407 			/*
   3408 			 * Multicast list has changed; set the hardware filter
   3409 			 * accordingly.
   3410 			 */
   3411 			WM_CORE_LOCK(sc);
   3412 			wm_set_filter(sc);
   3413 			WM_CORE_UNLOCK(sc);
   3414 		}
   3415 		break;
   3416 	}
   3417 
   3418 #ifndef WM_MPSAFE
   3419 	splx(s);
   3420 #endif
   3421 	return error;
   3422 }
   3423 
   3424 /* MAC address related */
   3425 
   3426 /*
   3427  * Get the offset of MAC address and return it.
   3428  * If error occured, use offset 0.
   3429  */
   3430 static uint16_t
   3431 wm_check_alt_mac_addr(struct wm_softc *sc)
   3432 {
   3433 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3434 	uint16_t offset = NVM_OFF_MACADDR;
   3435 
   3436 	/* Try to read alternative MAC address pointer */
   3437 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3438 		return 0;
   3439 
   3440 	/* Check pointer if it's valid or not. */
   3441 	if ((offset == 0x0000) || (offset == 0xffff))
   3442 		return 0;
   3443 
   3444 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3445 	/*
   3446 	 * Check whether alternative MAC address is valid or not.
   3447 	 * Some cards have non 0xffff pointer but those don't use
   3448 	 * alternative MAC address in reality.
   3449 	 *
   3450 	 * Check whether the broadcast bit is set or not.
   3451 	 */
   3452 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3453 		if (((myea[0] & 0xff) & 0x01) == 0)
   3454 			return offset; /* Found */
   3455 
   3456 	/* Not found */
   3457 	return 0;
   3458 }
   3459 
   3460 static int
   3461 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3462 {
   3463 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3464 	uint16_t offset = NVM_OFF_MACADDR;
   3465 	int do_invert = 0;
   3466 
   3467 	switch (sc->sc_type) {
   3468 	case WM_T_82580:
   3469 	case WM_T_I350:
   3470 	case WM_T_I354:
   3471 		/* EEPROM Top Level Partitioning */
   3472 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3473 		break;
   3474 	case WM_T_82571:
   3475 	case WM_T_82575:
   3476 	case WM_T_82576:
   3477 	case WM_T_80003:
   3478 	case WM_T_I210:
   3479 	case WM_T_I211:
   3480 		offset = wm_check_alt_mac_addr(sc);
   3481 		if (offset == 0)
   3482 			if ((sc->sc_funcid & 0x01) == 1)
   3483 				do_invert = 1;
   3484 		break;
   3485 	default:
   3486 		if ((sc->sc_funcid & 0x01) == 1)
   3487 			do_invert = 1;
   3488 		break;
   3489 	}
   3490 
   3491 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3492 		goto bad;
   3493 
   3494 	enaddr[0] = myea[0] & 0xff;
   3495 	enaddr[1] = myea[0] >> 8;
   3496 	enaddr[2] = myea[1] & 0xff;
   3497 	enaddr[3] = myea[1] >> 8;
   3498 	enaddr[4] = myea[2] & 0xff;
   3499 	enaddr[5] = myea[2] >> 8;
   3500 
   3501 	/*
   3502 	 * Toggle the LSB of the MAC address on the second port
   3503 	 * of some dual port cards.
   3504 	 */
   3505 	if (do_invert != 0)
   3506 		enaddr[5] ^= 1;
   3507 
   3508 	return 0;
   3509 
   3510  bad:
   3511 	return -1;
   3512 }
   3513 
   3514 /*
   3515  * wm_set_ral:
   3516  *
   3517  *	Set an entery in the receive address list.
   3518  */
   3519 static void
   3520 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3521 {
   3522 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3523 	uint32_t wlock_mac;
   3524 	int rv;
   3525 
   3526 	if (enaddr != NULL) {
   3527 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3528 		    (enaddr[3] << 24);
   3529 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3530 		ral_hi |= RAL_AV;
   3531 	} else {
   3532 		ral_lo = 0;
   3533 		ral_hi = 0;
   3534 	}
   3535 
   3536 	switch (sc->sc_type) {
   3537 	case WM_T_82542_2_0:
   3538 	case WM_T_82542_2_1:
   3539 	case WM_T_82543:
   3540 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3541 		CSR_WRITE_FLUSH(sc);
   3542 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3543 		CSR_WRITE_FLUSH(sc);
   3544 		break;
   3545 	case WM_T_PCH2:
   3546 	case WM_T_PCH_LPT:
   3547 	case WM_T_PCH_SPT:
   3548 	case WM_T_PCH_CNP:
   3549 		if (idx == 0) {
   3550 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3551 			CSR_WRITE_FLUSH(sc);
   3552 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3553 			CSR_WRITE_FLUSH(sc);
   3554 			return;
   3555 		}
   3556 		if (sc->sc_type != WM_T_PCH2) {
   3557 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3558 			    FWSM_WLOCK_MAC);
   3559 			addrl = WMREG_SHRAL(idx - 1);
   3560 			addrh = WMREG_SHRAH(idx - 1);
   3561 		} else {
   3562 			wlock_mac = 0;
   3563 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3564 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3565 		}
   3566 
   3567 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3568 			rv = wm_get_swflag_ich8lan(sc);
   3569 			if (rv != 0)
   3570 				return;
   3571 			CSR_WRITE(sc, addrl, ral_lo);
   3572 			CSR_WRITE_FLUSH(sc);
   3573 			CSR_WRITE(sc, addrh, ral_hi);
   3574 			CSR_WRITE_FLUSH(sc);
   3575 			wm_put_swflag_ich8lan(sc);
   3576 		}
   3577 
   3578 		break;
   3579 	default:
   3580 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3581 		CSR_WRITE_FLUSH(sc);
   3582 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3583 		CSR_WRITE_FLUSH(sc);
   3584 		break;
   3585 	}
   3586 }
   3587 
   3588 /*
   3589  * wm_mchash:
   3590  *
   3591  *	Compute the hash of the multicast address for the 4096-bit
   3592  *	multicast filter.
   3593  */
   3594 static uint32_t
   3595 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3596 {
   3597 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3598 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3599 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3600 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3601 	uint32_t hash;
   3602 
   3603 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3604 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3605 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3606 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3607 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3608 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3609 		return (hash & 0x3ff);
   3610 	}
   3611 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3612 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3613 
   3614 	return (hash & 0xfff);
   3615 }
   3616 
   3617 /*
   3618  *
   3619  *
   3620  */
   3621 static int
   3622 wm_rar_count(struct wm_softc *sc)
   3623 {
   3624 	int size;
   3625 
   3626 	switch (sc->sc_type) {
   3627 	case WM_T_ICH8:
   3628 		size = WM_RAL_TABSIZE_ICH8 -1;
   3629 		break;
   3630 	case WM_T_ICH9:
   3631 	case WM_T_ICH10:
   3632 	case WM_T_PCH:
   3633 		size = WM_RAL_TABSIZE_ICH8;
   3634 		break;
   3635 	case WM_T_PCH2:
   3636 		size = WM_RAL_TABSIZE_PCH2;
   3637 		break;
   3638 	case WM_T_PCH_LPT:
   3639 	case WM_T_PCH_SPT:
   3640 	case WM_T_PCH_CNP:
   3641 		size = WM_RAL_TABSIZE_PCH_LPT;
   3642 		break;
   3643 	case WM_T_82575:
   3644 	case WM_T_I210:
   3645 	case WM_T_I211:
   3646 		size = WM_RAL_TABSIZE_82575;
   3647 		break;
   3648 	case WM_T_82576:
   3649 	case WM_T_82580:
   3650 		size = WM_RAL_TABSIZE_82576;
   3651 		break;
   3652 	case WM_T_I350:
   3653 	case WM_T_I354:
   3654 		size = WM_RAL_TABSIZE_I350;
   3655 		break;
   3656 	default:
   3657 		size = WM_RAL_TABSIZE;
   3658 	}
   3659 
   3660 	return size;
   3661 }
   3662 
   3663 /*
   3664  * wm_set_filter:
   3665  *
   3666  *	Set up the receive filter.
   3667  */
   3668 static void
   3669 wm_set_filter(struct wm_softc *sc)
   3670 {
   3671 	struct ethercom *ec = &sc->sc_ethercom;
   3672 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3673 	struct ether_multi *enm;
   3674 	struct ether_multistep step;
   3675 	bus_addr_t mta_reg;
   3676 	uint32_t hash, reg, bit;
   3677 	int i, size, ralmax;
   3678 
   3679 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3680 		device_xname(sc->sc_dev), __func__));
   3681 
   3682 	if (sc->sc_type >= WM_T_82544)
   3683 		mta_reg = WMREG_CORDOVA_MTA;
   3684 	else
   3685 		mta_reg = WMREG_MTA;
   3686 
   3687 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3688 
   3689 	if (ifp->if_flags & IFF_BROADCAST)
   3690 		sc->sc_rctl |= RCTL_BAM;
   3691 	if (ifp->if_flags & IFF_PROMISC) {
   3692 		sc->sc_rctl |= RCTL_UPE;
   3693 		goto allmulti;
   3694 	}
   3695 
   3696 	/*
   3697 	 * Set the station address in the first RAL slot, and
   3698 	 * clear the remaining slots.
   3699 	 */
   3700 	size = wm_rar_count(sc);
   3701 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3702 
   3703 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3704 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3705 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3706 		switch (i) {
   3707 		case 0:
   3708 			/* We can use all entries */
   3709 			ralmax = size;
   3710 			break;
   3711 		case 1:
   3712 			/* Only RAR[0] */
   3713 			ralmax = 1;
   3714 			break;
   3715 		default:
   3716 			/* available SHRA + RAR[0] */
   3717 			ralmax = i + 1;
   3718 		}
   3719 	} else
   3720 		ralmax = size;
   3721 	for (i = 1; i < size; i++) {
   3722 		if (i < ralmax)
   3723 			wm_set_ral(sc, NULL, i);
   3724 	}
   3725 
   3726 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3727 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3728 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3729 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3730 		size = WM_ICH8_MC_TABSIZE;
   3731 	else
   3732 		size = WM_MC_TABSIZE;
   3733 	/* Clear out the multicast table. */
   3734 	for (i = 0; i < size; i++) {
   3735 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3736 		CSR_WRITE_FLUSH(sc);
   3737 	}
   3738 
   3739 	ETHER_LOCK(ec);
   3740 	ETHER_FIRST_MULTI(step, ec, enm);
   3741 	while (enm != NULL) {
   3742 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3743 			ETHER_UNLOCK(ec);
   3744 			/*
   3745 			 * We must listen to a range of multicast addresses.
   3746 			 * For now, just accept all multicasts, rather than
   3747 			 * trying to set only those filter bits needed to match
   3748 			 * the range.  (At this time, the only use of address
   3749 			 * ranges is for IP multicast routing, for which the
   3750 			 * range is big enough to require all bits set.)
   3751 			 */
   3752 			goto allmulti;
   3753 		}
   3754 
   3755 		hash = wm_mchash(sc, enm->enm_addrlo);
   3756 
   3757 		reg = (hash >> 5);
   3758 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3759 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3760 		    || (sc->sc_type == WM_T_PCH2)
   3761 		    || (sc->sc_type == WM_T_PCH_LPT)
   3762 		    || (sc->sc_type == WM_T_PCH_SPT)
   3763 		    || (sc->sc_type == WM_T_PCH_CNP))
   3764 			reg &= 0x1f;
   3765 		else
   3766 			reg &= 0x7f;
   3767 		bit = hash & 0x1f;
   3768 
   3769 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3770 		hash |= 1U << bit;
   3771 
   3772 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3773 			/*
   3774 			 * 82544 Errata 9: Certain register cannot be written
   3775 			 * with particular alignments in PCI-X bus operation
   3776 			 * (FCAH, MTA and VFTA).
   3777 			 */
   3778 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3779 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3780 			CSR_WRITE_FLUSH(sc);
   3781 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3782 			CSR_WRITE_FLUSH(sc);
   3783 		} else {
   3784 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3785 			CSR_WRITE_FLUSH(sc);
   3786 		}
   3787 
   3788 		ETHER_NEXT_MULTI(step, enm);
   3789 	}
   3790 	ETHER_UNLOCK(ec);
   3791 
   3792 	ifp->if_flags &= ~IFF_ALLMULTI;
   3793 	goto setit;
   3794 
   3795  allmulti:
   3796 	ifp->if_flags |= IFF_ALLMULTI;
   3797 	sc->sc_rctl |= RCTL_MPE;
   3798 
   3799  setit:
   3800 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3801 }
   3802 
   3803 /* Reset and init related */
   3804 
   3805 static void
   3806 wm_set_vlan(struct wm_softc *sc)
   3807 {
   3808 
   3809 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3810 		device_xname(sc->sc_dev), __func__));
   3811 
   3812 	/* Deal with VLAN enables. */
   3813 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3814 		sc->sc_ctrl |= CTRL_VME;
   3815 	else
   3816 		sc->sc_ctrl &= ~CTRL_VME;
   3817 
   3818 	/* Write the control registers. */
   3819 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3820 }
   3821 
   3822 static void
   3823 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3824 {
   3825 	uint32_t gcr;
   3826 	pcireg_t ctrl2;
   3827 
   3828 	gcr = CSR_READ(sc, WMREG_GCR);
   3829 
   3830 	/* Only take action if timeout value is defaulted to 0 */
   3831 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3832 		goto out;
   3833 
   3834 	if ((gcr & GCR_CAP_VER2) == 0) {
   3835 		gcr |= GCR_CMPL_TMOUT_10MS;
   3836 		goto out;
   3837 	}
   3838 
   3839 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3840 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3841 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3842 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3843 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3844 
   3845 out:
   3846 	/* Disable completion timeout resend */
   3847 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3848 
   3849 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3850 }
   3851 
   3852 void
   3853 wm_get_auto_rd_done(struct wm_softc *sc)
   3854 {
   3855 	int i;
   3856 
   3857 	/* wait for eeprom to reload */
   3858 	switch (sc->sc_type) {
   3859 	case WM_T_82571:
   3860 	case WM_T_82572:
   3861 	case WM_T_82573:
   3862 	case WM_T_82574:
   3863 	case WM_T_82583:
   3864 	case WM_T_82575:
   3865 	case WM_T_82576:
   3866 	case WM_T_82580:
   3867 	case WM_T_I350:
   3868 	case WM_T_I354:
   3869 	case WM_T_I210:
   3870 	case WM_T_I211:
   3871 	case WM_T_80003:
   3872 	case WM_T_ICH8:
   3873 	case WM_T_ICH9:
   3874 		for (i = 0; i < 10; i++) {
   3875 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3876 				break;
   3877 			delay(1000);
   3878 		}
   3879 		if (i == 10) {
   3880 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3881 			    "complete\n", device_xname(sc->sc_dev));
   3882 		}
   3883 		break;
   3884 	default:
   3885 		break;
   3886 	}
   3887 }
   3888 
   3889 void
   3890 wm_lan_init_done(struct wm_softc *sc)
   3891 {
   3892 	uint32_t reg = 0;
   3893 	int i;
   3894 
   3895 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3896 		device_xname(sc->sc_dev), __func__));
   3897 
   3898 	/* Wait for eeprom to reload */
   3899 	switch (sc->sc_type) {
   3900 	case WM_T_ICH10:
   3901 	case WM_T_PCH:
   3902 	case WM_T_PCH2:
   3903 	case WM_T_PCH_LPT:
   3904 	case WM_T_PCH_SPT:
   3905 	case WM_T_PCH_CNP:
   3906 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3907 			reg = CSR_READ(sc, WMREG_STATUS);
   3908 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3909 				break;
   3910 			delay(100);
   3911 		}
   3912 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3913 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3914 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3915 		}
   3916 		break;
   3917 	default:
   3918 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3919 		    __func__);
   3920 		break;
   3921 	}
   3922 
   3923 	reg &= ~STATUS_LAN_INIT_DONE;
   3924 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3925 }
   3926 
   3927 void
   3928 wm_get_cfg_done(struct wm_softc *sc)
   3929 {
   3930 	int mask;
   3931 	uint32_t reg;
   3932 	int i;
   3933 
   3934 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3935 		device_xname(sc->sc_dev), __func__));
   3936 
   3937 	/* Wait for eeprom to reload */
   3938 	switch (sc->sc_type) {
   3939 	case WM_T_82542_2_0:
   3940 	case WM_T_82542_2_1:
   3941 		/* null */
   3942 		break;
   3943 	case WM_T_82543:
   3944 	case WM_T_82544:
   3945 	case WM_T_82540:
   3946 	case WM_T_82545:
   3947 	case WM_T_82545_3:
   3948 	case WM_T_82546:
   3949 	case WM_T_82546_3:
   3950 	case WM_T_82541:
   3951 	case WM_T_82541_2:
   3952 	case WM_T_82547:
   3953 	case WM_T_82547_2:
   3954 	case WM_T_82573:
   3955 	case WM_T_82574:
   3956 	case WM_T_82583:
   3957 		/* generic */
   3958 		delay(10*1000);
   3959 		break;
   3960 	case WM_T_80003:
   3961 	case WM_T_82571:
   3962 	case WM_T_82572:
   3963 	case WM_T_82575:
   3964 	case WM_T_82576:
   3965 	case WM_T_82580:
   3966 	case WM_T_I350:
   3967 	case WM_T_I354:
   3968 	case WM_T_I210:
   3969 	case WM_T_I211:
   3970 		if (sc->sc_type == WM_T_82571) {
   3971 			/* Only 82571 shares port 0 */
   3972 			mask = EEMNGCTL_CFGDONE_0;
   3973 		} else
   3974 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3975 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3976 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3977 				break;
   3978 			delay(1000);
   3979 		}
   3980 		if (i >= WM_PHY_CFG_TIMEOUT)
   3981 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3982 				device_xname(sc->sc_dev), __func__));
   3983 		break;
   3984 	case WM_T_ICH8:
   3985 	case WM_T_ICH9:
   3986 	case WM_T_ICH10:
   3987 	case WM_T_PCH:
   3988 	case WM_T_PCH2:
   3989 	case WM_T_PCH_LPT:
   3990 	case WM_T_PCH_SPT:
   3991 	case WM_T_PCH_CNP:
   3992 		delay(10*1000);
   3993 		if (sc->sc_type >= WM_T_ICH10)
   3994 			wm_lan_init_done(sc);
   3995 		else
   3996 			wm_get_auto_rd_done(sc);
   3997 
   3998 		/* Clear PHY Reset Asserted bit */
   3999 		reg = CSR_READ(sc, WMREG_STATUS);
   4000 		if ((reg & STATUS_PHYRA) != 0)
   4001 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4002 		break;
   4003 	default:
   4004 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4005 		    __func__);
   4006 		break;
   4007 	}
   4008 }
   4009 
   4010 int
   4011 wm_phy_post_reset(struct wm_softc *sc)
   4012 {
   4013 	device_t dev = sc->sc_dev;
   4014 	uint16_t reg;
   4015 	int rv = 0;
   4016 
   4017 	/* This function is only for ICH8 and newer. */
   4018 	if (sc->sc_type < WM_T_ICH8)
   4019 		return 0;
   4020 
   4021 	if (wm_phy_resetisblocked(sc)) {
   4022 		/* XXX */
   4023 		device_printf(dev, "PHY is blocked\n");
   4024 		return -1;
   4025 	}
   4026 
   4027 	/* Allow time for h/w to get to quiescent state after reset */
   4028 	delay(10*1000);
   4029 
   4030 	/* Perform any necessary post-reset workarounds */
   4031 	if (sc->sc_type == WM_T_PCH)
   4032 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4033 	else if (sc->sc_type == WM_T_PCH2)
   4034 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4035 	if (rv != 0)
   4036 		return rv;
   4037 
   4038 	/* Clear the host wakeup bit after lcd reset */
   4039 	if (sc->sc_type >= WM_T_PCH) {
   4040 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4041 		reg &= ~BM_WUC_HOST_WU_BIT;
   4042 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4043 	}
   4044 
   4045 	/* Configure the LCD with the extended configuration region in NVM */
   4046 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4047 		return rv;
   4048 
   4049 	/* Configure the LCD with the OEM bits in NVM */
   4050 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4051 
   4052 	if (sc->sc_type == WM_T_PCH2) {
   4053 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4054 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4055 			delay(10 * 1000);
   4056 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4057 		}
   4058 		/* Set EEE LPI Update Timer to 200usec */
   4059 		rv = sc->phy.acquire(sc);
   4060 		if (rv)
   4061 			return rv;
   4062 		rv = wm_write_emi_reg_locked(dev,
   4063 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4064 		sc->phy.release(sc);
   4065 	}
   4066 
   4067 	return rv;
   4068 }
   4069 
   4070 /* Only for PCH and newer */
   4071 static int
   4072 wm_write_smbus_addr(struct wm_softc *sc)
   4073 {
   4074 	uint32_t strap, freq;
   4075 	uint16_t phy_data;
   4076 	int rv;
   4077 
   4078 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4079 		device_xname(sc->sc_dev), __func__));
   4080 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4081 
   4082 	strap = CSR_READ(sc, WMREG_STRAP);
   4083 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4084 
   4085 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4086 	if (rv != 0)
   4087 		return -1;
   4088 
   4089 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4090 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4091 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4092 
   4093 	if (sc->sc_phytype == WMPHY_I217) {
   4094 		/* Restore SMBus frequency */
   4095 		if (freq --) {
   4096 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4097 			    | HV_SMB_ADDR_FREQ_HIGH);
   4098 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4099 			    HV_SMB_ADDR_FREQ_LOW);
   4100 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4101 			    HV_SMB_ADDR_FREQ_HIGH);
   4102 		} else
   4103 			DPRINTF(WM_DEBUG_INIT,
   4104 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4105 				device_xname(sc->sc_dev), __func__));
   4106 	}
   4107 
   4108 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4109 	    phy_data);
   4110 }
   4111 
   4112 static int
   4113 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4114 {
   4115 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4116 	uint16_t phy_page = 0;
   4117 	int rv = 0;
   4118 
   4119 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4120 		device_xname(sc->sc_dev), __func__));
   4121 
   4122 	switch (sc->sc_type) {
   4123 	case WM_T_ICH8:
   4124 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4125 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4126 			return 0;
   4127 
   4128 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4129 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4130 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4131 			break;
   4132 		}
   4133 		/* FALLTHROUGH */
   4134 	case WM_T_PCH:
   4135 	case WM_T_PCH2:
   4136 	case WM_T_PCH_LPT:
   4137 	case WM_T_PCH_SPT:
   4138 	case WM_T_PCH_CNP:
   4139 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4140 		break;
   4141 	default:
   4142 		return 0;
   4143 	}
   4144 
   4145 	if ((rv = sc->phy.acquire(sc)) != 0)
   4146 		return rv;
   4147 
   4148 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4149 	if ((reg & sw_cfg_mask) == 0)
   4150 		goto release;
   4151 
   4152 	/*
   4153 	 * Make sure HW does not configure LCD from PHY extended configuration
   4154 	 * before SW configuration
   4155 	 */
   4156 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4157 	if ((sc->sc_type < WM_T_PCH2)
   4158 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4159 		goto release;
   4160 
   4161 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4162 		device_xname(sc->sc_dev), __func__));
   4163 	/* word_addr is in DWORD */
   4164 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4165 
   4166 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4167 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4168 	if (cnf_size == 0)
   4169 		goto release;
   4170 
   4171 	if (((sc->sc_type == WM_T_PCH)
   4172 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4173 	    || (sc->sc_type > WM_T_PCH)) {
   4174 		/*
   4175 		 * HW configures the SMBus address and LEDs when the OEM and
   4176 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4177 		 * are cleared, SW will configure them instead.
   4178 		 */
   4179 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4180 			device_xname(sc->sc_dev), __func__));
   4181 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4182 			goto release;
   4183 
   4184 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4185 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4186 		    (uint16_t)reg);
   4187 		if (rv != 0)
   4188 			goto release;
   4189 	}
   4190 
   4191 	/* Configure LCD from extended configuration region. */
   4192 	for (i = 0; i < cnf_size; i++) {
   4193 		uint16_t reg_data, reg_addr;
   4194 
   4195 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4196 			goto release;
   4197 
   4198 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4199 			goto release;
   4200 
   4201 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4202 			phy_page = reg_data;
   4203 
   4204 		reg_addr &= IGPHY_MAXREGADDR;
   4205 		reg_addr |= phy_page;
   4206 
   4207 		KASSERT(sc->phy.writereg_locked != NULL);
   4208 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4209 		    reg_data);
   4210 	}
   4211 
   4212 release:
   4213 	sc->phy.release(sc);
   4214 	return rv;
   4215 }
   4216 
   4217 /*
   4218  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4219  *  @sc:       pointer to the HW structure
   4220  *  @d0_state: boolean if entering d0 or d3 device state
   4221  *
   4222  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4223  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4224  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4225  */
   4226 int
   4227 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4228 {
   4229 	uint32_t mac_reg;
   4230 	uint16_t oem_reg;
   4231 	int rv;
   4232 
   4233 	if (sc->sc_type < WM_T_PCH)
   4234 		return 0;
   4235 
   4236 	rv = sc->phy.acquire(sc);
   4237 	if (rv != 0)
   4238 		return rv;
   4239 
   4240 	if (sc->sc_type == WM_T_PCH) {
   4241 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4242 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4243 			goto release;
   4244 	}
   4245 
   4246 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4247 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4248 		goto release;
   4249 
   4250 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4251 
   4252 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4253 	if (rv != 0)
   4254 		goto release;
   4255 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4256 
   4257 	if (d0_state) {
   4258 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4259 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4260 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4261 			oem_reg |= HV_OEM_BITS_LPLU;
   4262 	} else {
   4263 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4264 		    != 0)
   4265 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4266 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4267 		    != 0)
   4268 			oem_reg |= HV_OEM_BITS_LPLU;
   4269 	}
   4270 
   4271 	/* Set Restart auto-neg to activate the bits */
   4272 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4273 	    && (wm_phy_resetisblocked(sc) == false))
   4274 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4275 
   4276 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4277 
   4278 release:
   4279 	sc->phy.release(sc);
   4280 
   4281 	return rv;
   4282 }
   4283 
   4284 /* Init hardware bits */
   4285 void
   4286 wm_initialize_hardware_bits(struct wm_softc *sc)
   4287 {
   4288 	uint32_t tarc0, tarc1, reg;
   4289 
   4290 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4291 		device_xname(sc->sc_dev), __func__));
   4292 
   4293 	/* For 82571 variant, 80003 and ICHs */
   4294 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4295 	    || (sc->sc_type >= WM_T_80003)) {
   4296 
   4297 		/* Transmit Descriptor Control 0 */
   4298 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4299 		reg |= TXDCTL_COUNT_DESC;
   4300 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4301 
   4302 		/* Transmit Descriptor Control 1 */
   4303 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4304 		reg |= TXDCTL_COUNT_DESC;
   4305 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4306 
   4307 		/* TARC0 */
   4308 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4309 		switch (sc->sc_type) {
   4310 		case WM_T_82571:
   4311 		case WM_T_82572:
   4312 		case WM_T_82573:
   4313 		case WM_T_82574:
   4314 		case WM_T_82583:
   4315 		case WM_T_80003:
   4316 			/* Clear bits 30..27 */
   4317 			tarc0 &= ~__BITS(30, 27);
   4318 			break;
   4319 		default:
   4320 			break;
   4321 		}
   4322 
   4323 		switch (sc->sc_type) {
   4324 		case WM_T_82571:
   4325 		case WM_T_82572:
   4326 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4327 
   4328 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4329 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4330 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4331 			/* 8257[12] Errata No.7 */
   4332 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4333 
   4334 			/* TARC1 bit 28 */
   4335 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4336 				tarc1 &= ~__BIT(28);
   4337 			else
   4338 				tarc1 |= __BIT(28);
   4339 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4340 
   4341 			/*
   4342 			 * 8257[12] Errata No.13
   4343 			 * Disable Dyamic Clock Gating.
   4344 			 */
   4345 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4346 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4347 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4348 			break;
   4349 		case WM_T_82573:
   4350 		case WM_T_82574:
   4351 		case WM_T_82583:
   4352 			if ((sc->sc_type == WM_T_82574)
   4353 			    || (sc->sc_type == WM_T_82583))
   4354 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4355 
   4356 			/* Extended Device Control */
   4357 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4358 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4359 			reg |= __BIT(22);	/* Set bit 22 */
   4360 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4361 
   4362 			/* Device Control */
   4363 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4364 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4365 
   4366 			/* PCIe Control Register */
   4367 			/*
   4368 			 * 82573 Errata (unknown).
   4369 			 *
   4370 			 * 82574 Errata 25 and 82583 Errata 12
   4371 			 * "Dropped Rx Packets":
   4372 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4373 			 */
   4374 			reg = CSR_READ(sc, WMREG_GCR);
   4375 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4376 			CSR_WRITE(sc, WMREG_GCR, reg);
   4377 
   4378 			if ((sc->sc_type == WM_T_82574)
   4379 			    || (sc->sc_type == WM_T_82583)) {
   4380 				/*
   4381 				 * Document says this bit must be set for
   4382 				 * proper operation.
   4383 				 */
   4384 				reg = CSR_READ(sc, WMREG_GCR);
   4385 				reg |= __BIT(22);
   4386 				CSR_WRITE(sc, WMREG_GCR, reg);
   4387 
   4388 				/*
   4389 				 * Apply workaround for hardware errata
   4390 				 * documented in errata docs Fixes issue where
   4391 				 * some error prone or unreliable PCIe
   4392 				 * completions are occurring, particularly
   4393 				 * with ASPM enabled. Without fix, issue can
   4394 				 * cause Tx timeouts.
   4395 				 */
   4396 				reg = CSR_READ(sc, WMREG_GCR2);
   4397 				reg |= __BIT(0);
   4398 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4399 			}
   4400 			break;
   4401 		case WM_T_80003:
   4402 			/* TARC0 */
   4403 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4404 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4405 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4406 
   4407 			/* TARC1 bit 28 */
   4408 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4409 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4410 				tarc1 &= ~__BIT(28);
   4411 			else
   4412 				tarc1 |= __BIT(28);
   4413 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4414 			break;
   4415 		case WM_T_ICH8:
   4416 		case WM_T_ICH9:
   4417 		case WM_T_ICH10:
   4418 		case WM_T_PCH:
   4419 		case WM_T_PCH2:
   4420 		case WM_T_PCH_LPT:
   4421 		case WM_T_PCH_SPT:
   4422 		case WM_T_PCH_CNP:
   4423 			/* TARC0 */
   4424 			if (sc->sc_type == WM_T_ICH8) {
   4425 				/* Set TARC0 bits 29 and 28 */
   4426 				tarc0 |= __BITS(29, 28);
   4427 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4428 				tarc0 |= __BIT(29);
   4429 				/*
   4430 				 *  Drop bit 28. From Linux.
   4431 				 * See I218/I219 spec update
   4432 				 * "5. Buffer Overrun While the I219 is
   4433 				 * Processing DMA Transactions"
   4434 				 */
   4435 				tarc0 &= ~__BIT(28);
   4436 			}
   4437 			/* Set TARC0 bits 23,24,26,27 */
   4438 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4439 
   4440 			/* CTRL_EXT */
   4441 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4442 			reg |= __BIT(22);	/* Set bit 22 */
   4443 			/*
   4444 			 * Enable PHY low-power state when MAC is at D3
   4445 			 * w/o WoL
   4446 			 */
   4447 			if (sc->sc_type >= WM_T_PCH)
   4448 				reg |= CTRL_EXT_PHYPDEN;
   4449 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4450 
   4451 			/* TARC1 */
   4452 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4453 			/* bit 28 */
   4454 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4455 				tarc1 &= ~__BIT(28);
   4456 			else
   4457 				tarc1 |= __BIT(28);
   4458 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4459 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4460 
   4461 			/* Device Status */
   4462 			if (sc->sc_type == WM_T_ICH8) {
   4463 				reg = CSR_READ(sc, WMREG_STATUS);
   4464 				reg &= ~__BIT(31);
   4465 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4466 
   4467 			}
   4468 
   4469 			/* IOSFPC */
   4470 			if (sc->sc_type == WM_T_PCH_SPT) {
   4471 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4472 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4473 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4474 			}
   4475 			/*
   4476 			 * Work-around descriptor data corruption issue during
   4477 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4478 			 * capability.
   4479 			 */
   4480 			reg = CSR_READ(sc, WMREG_RFCTL);
   4481 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4482 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4483 			break;
   4484 		default:
   4485 			break;
   4486 		}
   4487 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4488 
   4489 		switch (sc->sc_type) {
   4490 		/*
   4491 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4492 		 * Avoid RSS Hash Value bug.
   4493 		 */
   4494 		case WM_T_82571:
   4495 		case WM_T_82572:
   4496 		case WM_T_82573:
   4497 		case WM_T_80003:
   4498 		case WM_T_ICH8:
   4499 			reg = CSR_READ(sc, WMREG_RFCTL);
   4500 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4501 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4502 			break;
   4503 		case WM_T_82574:
   4504 			/* use extened Rx descriptor. */
   4505 			reg = CSR_READ(sc, WMREG_RFCTL);
   4506 			reg |= WMREG_RFCTL_EXSTEN;
   4507 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4508 			break;
   4509 		default:
   4510 			break;
   4511 		}
   4512 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4513 		/*
   4514 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4515 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4516 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4517 		 * Correctly by the Device"
   4518 		 *
   4519 		 * I354(C2000) Errata AVR53:
   4520 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4521 		 * Hang"
   4522 		 */
   4523 		reg = CSR_READ(sc, WMREG_RFCTL);
   4524 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4525 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4526 	}
   4527 }
   4528 
   4529 static uint32_t
   4530 wm_rxpbs_adjust_82580(uint32_t val)
   4531 {
   4532 	uint32_t rv = 0;
   4533 
   4534 	if (val < __arraycount(wm_82580_rxpbs_table))
   4535 		rv = wm_82580_rxpbs_table[val];
   4536 
   4537 	return rv;
   4538 }
   4539 
   4540 /*
   4541  * wm_reset_phy:
   4542  *
   4543  *	generic PHY reset function.
   4544  *	Same as e1000_phy_hw_reset_generic()
   4545  */
   4546 static int
   4547 wm_reset_phy(struct wm_softc *sc)
   4548 {
   4549 	uint32_t reg;
   4550 
   4551 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4552 		device_xname(sc->sc_dev), __func__));
   4553 	if (wm_phy_resetisblocked(sc))
   4554 		return -1;
   4555 
   4556 	sc->phy.acquire(sc);
   4557 
   4558 	reg = CSR_READ(sc, WMREG_CTRL);
   4559 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4560 	CSR_WRITE_FLUSH(sc);
   4561 
   4562 	delay(sc->phy.reset_delay_us);
   4563 
   4564 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4565 	CSR_WRITE_FLUSH(sc);
   4566 
   4567 	delay(150);
   4568 
   4569 	sc->phy.release(sc);
   4570 
   4571 	wm_get_cfg_done(sc);
   4572 	wm_phy_post_reset(sc);
   4573 
   4574 	return 0;
   4575 }
   4576 
   4577 /*
   4578  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4579  * so it is enough to check sc->sc_queue[0] only.
   4580  */
   4581 static void
   4582 wm_flush_desc_rings(struct wm_softc *sc)
   4583 {
   4584 	pcireg_t preg;
   4585 	uint32_t reg;
   4586 	struct wm_txqueue *txq;
   4587 	wiseman_txdesc_t *txd;
   4588 	int nexttx;
   4589 	uint32_t rctl;
   4590 
   4591 	/* First, disable MULR fix in FEXTNVM11 */
   4592 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4593 	reg |= FEXTNVM11_DIS_MULRFIX;
   4594 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4595 
   4596 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4597 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4598 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4599 		return;
   4600 
   4601 	/* TX */
   4602 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4603 	    device_xname(sc->sc_dev), preg, reg);
   4604 	reg = CSR_READ(sc, WMREG_TCTL);
   4605 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4606 
   4607 	txq = &sc->sc_queue[0].wmq_txq;
   4608 	nexttx = txq->txq_next;
   4609 	txd = &txq->txq_descs[nexttx];
   4610 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4611 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4612 	txd->wtx_fields.wtxu_status = 0;
   4613 	txd->wtx_fields.wtxu_options = 0;
   4614 	txd->wtx_fields.wtxu_vlan = 0;
   4615 
   4616 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4617 	    BUS_SPACE_BARRIER_WRITE);
   4618 
   4619 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4620 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4621 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4622 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4623 	delay(250);
   4624 
   4625 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4626 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4627 		return;
   4628 
   4629 	/* RX */
   4630 	printf("%s: Need RX flush (reg = %08x)\n",
   4631 	    device_xname(sc->sc_dev), preg);
   4632 	rctl = CSR_READ(sc, WMREG_RCTL);
   4633 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4634 	CSR_WRITE_FLUSH(sc);
   4635 	delay(150);
   4636 
   4637 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4638 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4639 	reg &= 0xffffc000;
   4640 	/*
   4641 	 * update thresholds: prefetch threshold to 31, host threshold
   4642 	 * to 1 and make sure the granularity is "descriptors" and not
   4643 	 * "cache lines"
   4644 	 */
   4645 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4646 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4647 
   4648 	/*
   4649 	 * momentarily enable the RX ring for the changes to take
   4650 	 * effect
   4651 	 */
   4652 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4653 	CSR_WRITE_FLUSH(sc);
   4654 	delay(150);
   4655 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4656 }
   4657 
   4658 /*
   4659  * wm_reset:
   4660  *
   4661  *	Reset the i82542 chip.
   4662  */
   4663 static void
   4664 wm_reset(struct wm_softc *sc)
   4665 {
   4666 	int phy_reset = 0;
   4667 	int i, error = 0;
   4668 	uint32_t reg;
   4669 	uint16_t kmreg;
   4670 	int rv;
   4671 
   4672 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4673 		device_xname(sc->sc_dev), __func__));
   4674 	KASSERT(sc->sc_type != 0);
   4675 
   4676 	/*
   4677 	 * Allocate on-chip memory according to the MTU size.
   4678 	 * The Packet Buffer Allocation register must be written
   4679 	 * before the chip is reset.
   4680 	 */
   4681 	switch (sc->sc_type) {
   4682 	case WM_T_82547:
   4683 	case WM_T_82547_2:
   4684 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4685 		    PBA_22K : PBA_30K;
   4686 		for (i = 0; i < sc->sc_nqueues; i++) {
   4687 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4688 			txq->txq_fifo_head = 0;
   4689 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4690 			txq->txq_fifo_size =
   4691 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4692 			txq->txq_fifo_stall = 0;
   4693 		}
   4694 		break;
   4695 	case WM_T_82571:
   4696 	case WM_T_82572:
   4697 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4698 	case WM_T_80003:
   4699 		sc->sc_pba = PBA_32K;
   4700 		break;
   4701 	case WM_T_82573:
   4702 		sc->sc_pba = PBA_12K;
   4703 		break;
   4704 	case WM_T_82574:
   4705 	case WM_T_82583:
   4706 		sc->sc_pba = PBA_20K;
   4707 		break;
   4708 	case WM_T_82576:
   4709 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4710 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4711 		break;
   4712 	case WM_T_82580:
   4713 	case WM_T_I350:
   4714 	case WM_T_I354:
   4715 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4716 		break;
   4717 	case WM_T_I210:
   4718 	case WM_T_I211:
   4719 		sc->sc_pba = PBA_34K;
   4720 		break;
   4721 	case WM_T_ICH8:
   4722 		/* Workaround for a bit corruption issue in FIFO memory */
   4723 		sc->sc_pba = PBA_8K;
   4724 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4725 		break;
   4726 	case WM_T_ICH9:
   4727 	case WM_T_ICH10:
   4728 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4729 		    PBA_14K : PBA_10K;
   4730 		break;
   4731 	case WM_T_PCH:
   4732 	case WM_T_PCH2:	/* XXX 14K? */
   4733 	case WM_T_PCH_LPT:
   4734 	case WM_T_PCH_SPT:
   4735 	case WM_T_PCH_CNP:
   4736 		sc->sc_pba = PBA_26K;
   4737 		break;
   4738 	default:
   4739 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4740 		    PBA_40K : PBA_48K;
   4741 		break;
   4742 	}
   4743 	/*
   4744 	 * Only old or non-multiqueue devices have the PBA register
   4745 	 * XXX Need special handling for 82575.
   4746 	 */
   4747 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4748 	    || (sc->sc_type == WM_T_82575))
   4749 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4750 
   4751 	/* Prevent the PCI-E bus from sticking */
   4752 	if (sc->sc_flags & WM_F_PCIE) {
   4753 		int timeout = 800;
   4754 
   4755 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4756 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4757 
   4758 		while (timeout--) {
   4759 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4760 			    == 0)
   4761 				break;
   4762 			delay(100);
   4763 		}
   4764 		if (timeout == 0)
   4765 			device_printf(sc->sc_dev,
   4766 			    "failed to disable busmastering\n");
   4767 	}
   4768 
   4769 	/* Set the completion timeout for interface */
   4770 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4771 	    || (sc->sc_type == WM_T_82580)
   4772 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4773 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4774 		wm_set_pcie_completion_timeout(sc);
   4775 
   4776 	/* Clear interrupt */
   4777 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4778 	if (wm_is_using_msix(sc)) {
   4779 		if (sc->sc_type != WM_T_82574) {
   4780 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4781 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4782 		} else
   4783 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4784 	}
   4785 
   4786 	/* Stop the transmit and receive processes. */
   4787 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4788 	sc->sc_rctl &= ~RCTL_EN;
   4789 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4790 	CSR_WRITE_FLUSH(sc);
   4791 
   4792 	/* XXX set_tbi_sbp_82543() */
   4793 
   4794 	delay(10*1000);
   4795 
   4796 	/* Must acquire the MDIO ownership before MAC reset */
   4797 	switch (sc->sc_type) {
   4798 	case WM_T_82573:
   4799 	case WM_T_82574:
   4800 	case WM_T_82583:
   4801 		error = wm_get_hw_semaphore_82573(sc);
   4802 		break;
   4803 	default:
   4804 		break;
   4805 	}
   4806 
   4807 	/*
   4808 	 * 82541 Errata 29? & 82547 Errata 28?
   4809 	 * See also the description about PHY_RST bit in CTRL register
   4810 	 * in 8254x_GBe_SDM.pdf.
   4811 	 */
   4812 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4813 		CSR_WRITE(sc, WMREG_CTRL,
   4814 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4815 		CSR_WRITE_FLUSH(sc);
   4816 		delay(5000);
   4817 	}
   4818 
   4819 	switch (sc->sc_type) {
   4820 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4821 	case WM_T_82541:
   4822 	case WM_T_82541_2:
   4823 	case WM_T_82547:
   4824 	case WM_T_82547_2:
   4825 		/*
   4826 		 * On some chipsets, a reset through a memory-mapped write
   4827 		 * cycle can cause the chip to reset before completing the
   4828 		 * write cycle. This causes major headache that can be avoided
   4829 		 * by issuing the reset via indirect register writes through
   4830 		 * I/O space.
   4831 		 *
   4832 		 * So, if we successfully mapped the I/O BAR at attach time,
   4833 		 * use that. Otherwise, try our luck with a memory-mapped
   4834 		 * reset.
   4835 		 */
   4836 		if (sc->sc_flags & WM_F_IOH_VALID)
   4837 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4838 		else
   4839 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4840 		break;
   4841 	case WM_T_82545_3:
   4842 	case WM_T_82546_3:
   4843 		/* Use the shadow control register on these chips. */
   4844 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4845 		break;
   4846 	case WM_T_80003:
   4847 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4848 		sc->phy.acquire(sc);
   4849 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4850 		sc->phy.release(sc);
   4851 		break;
   4852 	case WM_T_ICH8:
   4853 	case WM_T_ICH9:
   4854 	case WM_T_ICH10:
   4855 	case WM_T_PCH:
   4856 	case WM_T_PCH2:
   4857 	case WM_T_PCH_LPT:
   4858 	case WM_T_PCH_SPT:
   4859 	case WM_T_PCH_CNP:
   4860 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4861 		if (wm_phy_resetisblocked(sc) == false) {
   4862 			/*
   4863 			 * Gate automatic PHY configuration by hardware on
   4864 			 * non-managed 82579
   4865 			 */
   4866 			if ((sc->sc_type == WM_T_PCH2)
   4867 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4868 				== 0))
   4869 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4870 
   4871 			reg |= CTRL_PHY_RESET;
   4872 			phy_reset = 1;
   4873 		} else
   4874 			printf("XXX reset is blocked!!!\n");
   4875 		sc->phy.acquire(sc);
   4876 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4877 		/* Don't insert a completion barrier when reset */
   4878 		delay(20*1000);
   4879 		mutex_exit(sc->sc_ich_phymtx);
   4880 		break;
   4881 	case WM_T_82580:
   4882 	case WM_T_I350:
   4883 	case WM_T_I354:
   4884 	case WM_T_I210:
   4885 	case WM_T_I211:
   4886 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4887 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4888 			CSR_WRITE_FLUSH(sc);
   4889 		delay(5000);
   4890 		break;
   4891 	case WM_T_82542_2_0:
   4892 	case WM_T_82542_2_1:
   4893 	case WM_T_82543:
   4894 	case WM_T_82540:
   4895 	case WM_T_82545:
   4896 	case WM_T_82546:
   4897 	case WM_T_82571:
   4898 	case WM_T_82572:
   4899 	case WM_T_82573:
   4900 	case WM_T_82574:
   4901 	case WM_T_82575:
   4902 	case WM_T_82576:
   4903 	case WM_T_82583:
   4904 	default:
   4905 		/* Everything else can safely use the documented method. */
   4906 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4907 		break;
   4908 	}
   4909 
   4910 	/* Must release the MDIO ownership after MAC reset */
   4911 	switch (sc->sc_type) {
   4912 	case WM_T_82573:
   4913 	case WM_T_82574:
   4914 	case WM_T_82583:
   4915 		if (error == 0)
   4916 			wm_put_hw_semaphore_82573(sc);
   4917 		break;
   4918 	default:
   4919 		break;
   4920 	}
   4921 
   4922 	/* Set Phy Config Counter to 50msec */
   4923 	if (sc->sc_type == WM_T_PCH2) {
   4924 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4925 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4926 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4927 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4928 	}
   4929 
   4930 	if (phy_reset != 0)
   4931 		wm_get_cfg_done(sc);
   4932 
   4933 	/* reload EEPROM */
   4934 	switch (sc->sc_type) {
   4935 	case WM_T_82542_2_0:
   4936 	case WM_T_82542_2_1:
   4937 	case WM_T_82543:
   4938 	case WM_T_82544:
   4939 		delay(10);
   4940 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4941 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4942 		CSR_WRITE_FLUSH(sc);
   4943 		delay(2000);
   4944 		break;
   4945 	case WM_T_82540:
   4946 	case WM_T_82545:
   4947 	case WM_T_82545_3:
   4948 	case WM_T_82546:
   4949 	case WM_T_82546_3:
   4950 		delay(5*1000);
   4951 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4952 		break;
   4953 	case WM_T_82541:
   4954 	case WM_T_82541_2:
   4955 	case WM_T_82547:
   4956 	case WM_T_82547_2:
   4957 		delay(20000);
   4958 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4959 		break;
   4960 	case WM_T_82571:
   4961 	case WM_T_82572:
   4962 	case WM_T_82573:
   4963 	case WM_T_82574:
   4964 	case WM_T_82583:
   4965 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4966 			delay(10);
   4967 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4968 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4969 			CSR_WRITE_FLUSH(sc);
   4970 		}
   4971 		/* check EECD_EE_AUTORD */
   4972 		wm_get_auto_rd_done(sc);
   4973 		/*
   4974 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4975 		 * is set.
   4976 		 */
   4977 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4978 		    || (sc->sc_type == WM_T_82583))
   4979 			delay(25*1000);
   4980 		break;
   4981 	case WM_T_82575:
   4982 	case WM_T_82576:
   4983 	case WM_T_82580:
   4984 	case WM_T_I350:
   4985 	case WM_T_I354:
   4986 	case WM_T_I210:
   4987 	case WM_T_I211:
   4988 	case WM_T_80003:
   4989 		/* check EECD_EE_AUTORD */
   4990 		wm_get_auto_rd_done(sc);
   4991 		break;
   4992 	case WM_T_ICH8:
   4993 	case WM_T_ICH9:
   4994 	case WM_T_ICH10:
   4995 	case WM_T_PCH:
   4996 	case WM_T_PCH2:
   4997 	case WM_T_PCH_LPT:
   4998 	case WM_T_PCH_SPT:
   4999 	case WM_T_PCH_CNP:
   5000 		break;
   5001 	default:
   5002 		panic("%s: unknown type\n", __func__);
   5003 	}
   5004 
   5005 	/* Check whether EEPROM is present or not */
   5006 	switch (sc->sc_type) {
   5007 	case WM_T_82575:
   5008 	case WM_T_82576:
   5009 	case WM_T_82580:
   5010 	case WM_T_I350:
   5011 	case WM_T_I354:
   5012 	case WM_T_ICH8:
   5013 	case WM_T_ICH9:
   5014 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5015 			/* Not found */
   5016 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5017 			if (sc->sc_type == WM_T_82575)
   5018 				wm_reset_init_script_82575(sc);
   5019 		}
   5020 		break;
   5021 	default:
   5022 		break;
   5023 	}
   5024 
   5025 	if (phy_reset != 0)
   5026 		wm_phy_post_reset(sc);
   5027 
   5028 	if ((sc->sc_type == WM_T_82580)
   5029 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5030 		/* clear global device reset status bit */
   5031 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5032 	}
   5033 
   5034 	/* Clear any pending interrupt events. */
   5035 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5036 	reg = CSR_READ(sc, WMREG_ICR);
   5037 	if (wm_is_using_msix(sc)) {
   5038 		if (sc->sc_type != WM_T_82574) {
   5039 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5040 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5041 		} else
   5042 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5043 	}
   5044 
   5045 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5046 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5047 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5048 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5049 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5050 		reg |= KABGTXD_BGSQLBIAS;
   5051 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5052 	}
   5053 
   5054 	/* reload sc_ctrl */
   5055 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5056 
   5057 	wm_set_eee(sc);
   5058 
   5059 	/*
   5060 	 * For PCH, this write will make sure that any noise will be detected
   5061 	 * as a CRC error and be dropped rather than show up as a bad packet
   5062 	 * to the DMA engine
   5063 	 */
   5064 	if (sc->sc_type == WM_T_PCH)
   5065 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5066 
   5067 	if (sc->sc_type >= WM_T_82544)
   5068 		CSR_WRITE(sc, WMREG_WUC, 0);
   5069 
   5070 	if (sc->sc_type < WM_T_82575)
   5071 		wm_disable_aspm(sc); /* Workaround for some chips */
   5072 
   5073 	wm_reset_mdicnfg_82580(sc);
   5074 
   5075 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5076 		wm_pll_workaround_i210(sc);
   5077 
   5078 	if (sc->sc_type == WM_T_80003) {
   5079 		/* default to TRUE to enable the MDIC W/A */
   5080 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5081 
   5082 		rv = wm_kmrn_readreg(sc,
   5083 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5084 		if (rv == 0) {
   5085 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5086 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5087 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5088 			else
   5089 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5090 		}
   5091 	}
   5092 }
   5093 
   5094 /*
   5095  * wm_add_rxbuf:
   5096  *
   5097  *	Add a receive buffer to the indiciated descriptor.
   5098  */
   5099 static int
   5100 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5101 {
   5102 	struct wm_softc *sc = rxq->rxq_sc;
   5103 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5104 	struct mbuf *m;
   5105 	int error;
   5106 
   5107 	KASSERT(mutex_owned(rxq->rxq_lock));
   5108 
   5109 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5110 	if (m == NULL)
   5111 		return ENOBUFS;
   5112 
   5113 	MCLGET(m, M_DONTWAIT);
   5114 	if ((m->m_flags & M_EXT) == 0) {
   5115 		m_freem(m);
   5116 		return ENOBUFS;
   5117 	}
   5118 
   5119 	if (rxs->rxs_mbuf != NULL)
   5120 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5121 
   5122 	rxs->rxs_mbuf = m;
   5123 
   5124 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5125 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   5126 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   5127 	if (error) {
   5128 		/* XXX XXX XXX */
   5129 		aprint_error_dev(sc->sc_dev,
   5130 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5131 		panic("wm_add_rxbuf");
   5132 	}
   5133 
   5134 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5135 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5136 
   5137 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5138 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5139 			wm_init_rxdesc(rxq, idx);
   5140 	} else
   5141 		wm_init_rxdesc(rxq, idx);
   5142 
   5143 	return 0;
   5144 }
   5145 
   5146 /*
   5147  * wm_rxdrain:
   5148  *
   5149  *	Drain the receive queue.
   5150  */
   5151 static void
   5152 wm_rxdrain(struct wm_rxqueue *rxq)
   5153 {
   5154 	struct wm_softc *sc = rxq->rxq_sc;
   5155 	struct wm_rxsoft *rxs;
   5156 	int i;
   5157 
   5158 	KASSERT(mutex_owned(rxq->rxq_lock));
   5159 
   5160 	for (i = 0; i < WM_NRXDESC; i++) {
   5161 		rxs = &rxq->rxq_soft[i];
   5162 		if (rxs->rxs_mbuf != NULL) {
   5163 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5164 			m_freem(rxs->rxs_mbuf);
   5165 			rxs->rxs_mbuf = NULL;
   5166 		}
   5167 	}
   5168 }
   5169 
   5170 /*
   5171  * Setup registers for RSS.
   5172  *
   5173  * XXX not yet VMDq support
   5174  */
   5175 static void
   5176 wm_init_rss(struct wm_softc *sc)
   5177 {
   5178 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5179 	int i;
   5180 
   5181 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5182 
   5183 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5184 		int qid, reta_ent;
   5185 
   5186 		qid  = i % sc->sc_nqueues;
   5187 		switch (sc->sc_type) {
   5188 		case WM_T_82574:
   5189 			reta_ent = __SHIFTIN(qid,
   5190 			    RETA_ENT_QINDEX_MASK_82574);
   5191 			break;
   5192 		case WM_T_82575:
   5193 			reta_ent = __SHIFTIN(qid,
   5194 			    RETA_ENT_QINDEX1_MASK_82575);
   5195 			break;
   5196 		default:
   5197 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5198 			break;
   5199 		}
   5200 
   5201 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5202 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5203 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5204 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5205 	}
   5206 
   5207 	rss_getkey((uint8_t *)rss_key);
   5208 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5209 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5210 
   5211 	if (sc->sc_type == WM_T_82574)
   5212 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5213 	else
   5214 		mrqc = MRQC_ENABLE_RSS_MQ;
   5215 
   5216 	/*
   5217 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5218 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5219 	 */
   5220 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5221 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5222 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5223 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5224 
   5225 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5226 }
   5227 
   5228 /*
   5229  * Adjust TX and RX queue numbers which the system actulally uses.
   5230  *
   5231  * The numbers are affected by below parameters.
   5232  *     - The nubmer of hardware queues
   5233  *     - The number of MSI-X vectors (= "nvectors" argument)
   5234  *     - ncpu
   5235  */
   5236 static void
   5237 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5238 {
   5239 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5240 
   5241 	if (nvectors < 2) {
   5242 		sc->sc_nqueues = 1;
   5243 		return;
   5244 	}
   5245 
   5246 	switch (sc->sc_type) {
   5247 	case WM_T_82572:
   5248 		hw_ntxqueues = 2;
   5249 		hw_nrxqueues = 2;
   5250 		break;
   5251 	case WM_T_82574:
   5252 		hw_ntxqueues = 2;
   5253 		hw_nrxqueues = 2;
   5254 		break;
   5255 	case WM_T_82575:
   5256 		hw_ntxqueues = 4;
   5257 		hw_nrxqueues = 4;
   5258 		break;
   5259 	case WM_T_82576:
   5260 		hw_ntxqueues = 16;
   5261 		hw_nrxqueues = 16;
   5262 		break;
   5263 	case WM_T_82580:
   5264 	case WM_T_I350:
   5265 	case WM_T_I354:
   5266 		hw_ntxqueues = 8;
   5267 		hw_nrxqueues = 8;
   5268 		break;
   5269 	case WM_T_I210:
   5270 		hw_ntxqueues = 4;
   5271 		hw_nrxqueues = 4;
   5272 		break;
   5273 	case WM_T_I211:
   5274 		hw_ntxqueues = 2;
   5275 		hw_nrxqueues = 2;
   5276 		break;
   5277 		/*
   5278 		 * As below ethernet controllers does not support MSI-X,
   5279 		 * this driver let them not use multiqueue.
   5280 		 *     - WM_T_80003
   5281 		 *     - WM_T_ICH8
   5282 		 *     - WM_T_ICH9
   5283 		 *     - WM_T_ICH10
   5284 		 *     - WM_T_PCH
   5285 		 *     - WM_T_PCH2
   5286 		 *     - WM_T_PCH_LPT
   5287 		 */
   5288 	default:
   5289 		hw_ntxqueues = 1;
   5290 		hw_nrxqueues = 1;
   5291 		break;
   5292 	}
   5293 
   5294 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5295 
   5296 	/*
   5297 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5298 	 * the number of queues used actually.
   5299 	 */
   5300 	if (nvectors < hw_nqueues + 1)
   5301 		sc->sc_nqueues = nvectors - 1;
   5302 	else
   5303 		sc->sc_nqueues = hw_nqueues;
   5304 
   5305 	/*
   5306 	 * As queues more then cpus cannot improve scaling, we limit
   5307 	 * the number of queues used actually.
   5308 	 */
   5309 	if (ncpu < sc->sc_nqueues)
   5310 		sc->sc_nqueues = ncpu;
   5311 }
   5312 
   5313 static inline bool
   5314 wm_is_using_msix(struct wm_softc *sc)
   5315 {
   5316 
   5317 	return (sc->sc_nintrs > 1);
   5318 }
   5319 
   5320 static inline bool
   5321 wm_is_using_multiqueue(struct wm_softc *sc)
   5322 {
   5323 
   5324 	return (sc->sc_nqueues > 1);
   5325 }
   5326 
   5327 static int
   5328 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5329 {
   5330 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5331 	wmq->wmq_id = qidx;
   5332 	wmq->wmq_intr_idx = intr_idx;
   5333 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5334 #ifdef WM_MPSAFE
   5335 	    | SOFTINT_MPSAFE
   5336 #endif
   5337 	    , wm_handle_queue, wmq);
   5338 	if (wmq->wmq_si != NULL)
   5339 		return 0;
   5340 
   5341 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5342 	    wmq->wmq_id);
   5343 
   5344 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5345 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5346 	return ENOMEM;
   5347 }
   5348 
   5349 /*
   5350  * Both single interrupt MSI and INTx can use this function.
   5351  */
   5352 static int
   5353 wm_setup_legacy(struct wm_softc *sc)
   5354 {
   5355 	pci_chipset_tag_t pc = sc->sc_pc;
   5356 	const char *intrstr = NULL;
   5357 	char intrbuf[PCI_INTRSTR_LEN];
   5358 	int error;
   5359 
   5360 	error = wm_alloc_txrx_queues(sc);
   5361 	if (error) {
   5362 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5363 		    error);
   5364 		return ENOMEM;
   5365 	}
   5366 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5367 	    sizeof(intrbuf));
   5368 #ifdef WM_MPSAFE
   5369 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5370 #endif
   5371 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5372 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5373 	if (sc->sc_ihs[0] == NULL) {
   5374 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5375 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5376 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5377 		return ENOMEM;
   5378 	}
   5379 
   5380 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5381 	sc->sc_nintrs = 1;
   5382 
   5383 	return wm_softint_establish(sc, 0, 0);
   5384 }
   5385 
   5386 static int
   5387 wm_setup_msix(struct wm_softc *sc)
   5388 {
   5389 	void *vih;
   5390 	kcpuset_t *affinity;
   5391 	int qidx, error, intr_idx, txrx_established;
   5392 	pci_chipset_tag_t pc = sc->sc_pc;
   5393 	const char *intrstr = NULL;
   5394 	char intrbuf[PCI_INTRSTR_LEN];
   5395 	char intr_xname[INTRDEVNAMEBUF];
   5396 
   5397 	if (sc->sc_nqueues < ncpu) {
   5398 		/*
   5399 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5400 		 * interrupts start from CPU#1.
   5401 		 */
   5402 		sc->sc_affinity_offset = 1;
   5403 	} else {
   5404 		/*
   5405 		 * In this case, this device use all CPUs. So, we unify
   5406 		 * affinitied cpu_index to msix vector number for readability.
   5407 		 */
   5408 		sc->sc_affinity_offset = 0;
   5409 	}
   5410 
   5411 	error = wm_alloc_txrx_queues(sc);
   5412 	if (error) {
   5413 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5414 		    error);
   5415 		return ENOMEM;
   5416 	}
   5417 
   5418 	kcpuset_create(&affinity, false);
   5419 	intr_idx = 0;
   5420 
   5421 	/*
   5422 	 * TX and RX
   5423 	 */
   5424 	txrx_established = 0;
   5425 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5426 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5427 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5428 
   5429 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5430 		    sizeof(intrbuf));
   5431 #ifdef WM_MPSAFE
   5432 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5433 		    PCI_INTR_MPSAFE, true);
   5434 #endif
   5435 		memset(intr_xname, 0, sizeof(intr_xname));
   5436 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5437 		    device_xname(sc->sc_dev), qidx);
   5438 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5439 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5440 		if (vih == NULL) {
   5441 			aprint_error_dev(sc->sc_dev,
   5442 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5443 			    intrstr ? " at " : "",
   5444 			    intrstr ? intrstr : "");
   5445 
   5446 			goto fail;
   5447 		}
   5448 		kcpuset_zero(affinity);
   5449 		/* Round-robin affinity */
   5450 		kcpuset_set(affinity, affinity_to);
   5451 		error = interrupt_distribute(vih, affinity, NULL);
   5452 		if (error == 0) {
   5453 			aprint_normal_dev(sc->sc_dev,
   5454 			    "for TX and RX interrupting at %s affinity to %u\n",
   5455 			    intrstr, affinity_to);
   5456 		} else {
   5457 			aprint_normal_dev(sc->sc_dev,
   5458 			    "for TX and RX interrupting at %s\n", intrstr);
   5459 		}
   5460 		sc->sc_ihs[intr_idx] = vih;
   5461 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5462 			goto fail;
   5463 		txrx_established++;
   5464 		intr_idx++;
   5465 	}
   5466 
   5467 	/*
   5468 	 * LINK
   5469 	 */
   5470 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5471 	    sizeof(intrbuf));
   5472 #ifdef WM_MPSAFE
   5473 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5474 #endif
   5475 	memset(intr_xname, 0, sizeof(intr_xname));
   5476 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5477 	    device_xname(sc->sc_dev));
   5478 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5479 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5480 	if (vih == NULL) {
   5481 		aprint_error_dev(sc->sc_dev,
   5482 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5483 		    intrstr ? " at " : "",
   5484 		    intrstr ? intrstr : "");
   5485 
   5486 		goto fail;
   5487 	}
   5488 	/* keep default affinity to LINK interrupt */
   5489 	aprint_normal_dev(sc->sc_dev,
   5490 	    "for LINK interrupting at %s\n", intrstr);
   5491 	sc->sc_ihs[intr_idx] = vih;
   5492 	sc->sc_link_intr_idx = intr_idx;
   5493 
   5494 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5495 	kcpuset_destroy(affinity);
   5496 	return 0;
   5497 
   5498  fail:
   5499 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5500 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5501 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5502 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5503 	}
   5504 
   5505 	kcpuset_destroy(affinity);
   5506 	return ENOMEM;
   5507 }
   5508 
   5509 static void
   5510 wm_unset_stopping_flags(struct wm_softc *sc)
   5511 {
   5512 	int i;
   5513 
   5514 	KASSERT(WM_CORE_LOCKED(sc));
   5515 
   5516 	/*
   5517 	 * must unset stopping flags in ascending order.
   5518 	 */
   5519 	for (i = 0; i < sc->sc_nqueues; i++) {
   5520 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5521 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5522 
   5523 		mutex_enter(txq->txq_lock);
   5524 		txq->txq_stopping = false;
   5525 		mutex_exit(txq->txq_lock);
   5526 
   5527 		mutex_enter(rxq->rxq_lock);
   5528 		rxq->rxq_stopping = false;
   5529 		mutex_exit(rxq->rxq_lock);
   5530 	}
   5531 
   5532 	sc->sc_core_stopping = false;
   5533 }
   5534 
   5535 static void
   5536 wm_set_stopping_flags(struct wm_softc *sc)
   5537 {
   5538 	int i;
   5539 
   5540 	KASSERT(WM_CORE_LOCKED(sc));
   5541 
   5542 	sc->sc_core_stopping = true;
   5543 
   5544 	/*
   5545 	 * must set stopping flags in ascending order.
   5546 	 */
   5547 	for (i = 0; i < sc->sc_nqueues; i++) {
   5548 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5549 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5550 
   5551 		mutex_enter(rxq->rxq_lock);
   5552 		rxq->rxq_stopping = true;
   5553 		mutex_exit(rxq->rxq_lock);
   5554 
   5555 		mutex_enter(txq->txq_lock);
   5556 		txq->txq_stopping = true;
   5557 		mutex_exit(txq->txq_lock);
   5558 	}
   5559 }
   5560 
   5561 /*
   5562  * write interrupt interval value to ITR or EITR
   5563  */
   5564 static void
   5565 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5566 {
   5567 
   5568 	if (!wmq->wmq_set_itr)
   5569 		return;
   5570 
   5571 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5572 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5573 
   5574 		/*
   5575 		 * 82575 doesn't have CNT_INGR field.
   5576 		 * So, overwrite counter field by software.
   5577 		 */
   5578 		if (sc->sc_type == WM_T_82575)
   5579 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5580 		else
   5581 			eitr |= EITR_CNT_INGR;
   5582 
   5583 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5584 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5585 		/*
   5586 		 * 82574 has both ITR and EITR. SET EITR when we use
   5587 		 * the multi queue function with MSI-X.
   5588 		 */
   5589 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5590 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5591 	} else {
   5592 		KASSERT(wmq->wmq_id == 0);
   5593 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5594 	}
   5595 
   5596 	wmq->wmq_set_itr = false;
   5597 }
   5598 
   5599 /*
   5600  * TODO
   5601  * Below dynamic calculation of itr is almost the same as linux igb,
   5602  * however it does not fit to wm(4). So, we will have been disable AIM
   5603  * until we will find appropriate calculation of itr.
   5604  */
   5605 /*
   5606  * calculate interrupt interval value to be going to write register in
   5607  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5608  */
   5609 static void
   5610 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5611 {
   5612 #ifdef NOTYET
   5613 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5614 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5615 	uint32_t avg_size = 0;
   5616 	uint32_t new_itr;
   5617 
   5618 	if (rxq->rxq_packets)
   5619 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5620 	if (txq->txq_packets)
   5621 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5622 
   5623 	if (avg_size == 0) {
   5624 		new_itr = 450; /* restore default value */
   5625 		goto out;
   5626 	}
   5627 
   5628 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5629 	avg_size += 24;
   5630 
   5631 	/* Don't starve jumbo frames */
   5632 	avg_size = uimin(avg_size, 3000);
   5633 
   5634 	/* Give a little boost to mid-size frames */
   5635 	if ((avg_size > 300) && (avg_size < 1200))
   5636 		new_itr = avg_size / 3;
   5637 	else
   5638 		new_itr = avg_size / 2;
   5639 
   5640 out:
   5641 	/*
   5642 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5643 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5644 	 */
   5645 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5646 		new_itr *= 4;
   5647 
   5648 	if (new_itr != wmq->wmq_itr) {
   5649 		wmq->wmq_itr = new_itr;
   5650 		wmq->wmq_set_itr = true;
   5651 	} else
   5652 		wmq->wmq_set_itr = false;
   5653 
   5654 	rxq->rxq_packets = 0;
   5655 	rxq->rxq_bytes = 0;
   5656 	txq->txq_packets = 0;
   5657 	txq->txq_bytes = 0;
   5658 #endif
   5659 }
   5660 
   5661 /*
   5662  * wm_init:		[ifnet interface function]
   5663  *
   5664  *	Initialize the interface.
   5665  */
   5666 static int
   5667 wm_init(struct ifnet *ifp)
   5668 {
   5669 	struct wm_softc *sc = ifp->if_softc;
   5670 	int ret;
   5671 
   5672 	WM_CORE_LOCK(sc);
   5673 	ret = wm_init_locked(ifp);
   5674 	WM_CORE_UNLOCK(sc);
   5675 
   5676 	return ret;
   5677 }
   5678 
   5679 static int
   5680 wm_init_locked(struct ifnet *ifp)
   5681 {
   5682 	struct wm_softc *sc = ifp->if_softc;
   5683 	struct ethercom *ec = &sc->sc_ethercom;
   5684 	int i, j, trynum, error = 0;
   5685 	uint32_t reg;
   5686 
   5687 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5688 		device_xname(sc->sc_dev), __func__));
   5689 	KASSERT(WM_CORE_LOCKED(sc));
   5690 
   5691 	/*
   5692 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5693 	 * There is a small but measurable benefit to avoiding the adjusment
   5694 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5695 	 * on such platforms.  One possibility is that the DMA itself is
   5696 	 * slightly more efficient if the front of the entire packet (instead
   5697 	 * of the front of the headers) is aligned.
   5698 	 *
   5699 	 * Note we must always set align_tweak to 0 if we are using
   5700 	 * jumbo frames.
   5701 	 */
   5702 #ifdef __NO_STRICT_ALIGNMENT
   5703 	sc->sc_align_tweak = 0;
   5704 #else
   5705 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5706 		sc->sc_align_tweak = 0;
   5707 	else
   5708 		sc->sc_align_tweak = 2;
   5709 #endif /* __NO_STRICT_ALIGNMENT */
   5710 
   5711 	/* Cancel any pending I/O. */
   5712 	wm_stop_locked(ifp, 0);
   5713 
   5714 	/* update statistics before reset */
   5715 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5716 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5717 
   5718 	/* PCH_SPT hardware workaround */
   5719 	if (sc->sc_type == WM_T_PCH_SPT)
   5720 		wm_flush_desc_rings(sc);
   5721 
   5722 	/* Reset the chip to a known state. */
   5723 	wm_reset(sc);
   5724 
   5725 	/*
   5726 	 * AMT based hardware can now take control from firmware
   5727 	 * Do this after reset.
   5728 	 */
   5729 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5730 		wm_get_hw_control(sc);
   5731 
   5732 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5733 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5734 		wm_legacy_irq_quirk_spt(sc);
   5735 
   5736 	/* Init hardware bits */
   5737 	wm_initialize_hardware_bits(sc);
   5738 
   5739 	/* Reset the PHY. */
   5740 	if (sc->sc_flags & WM_F_HAS_MII)
   5741 		wm_gmii_reset(sc);
   5742 
   5743 	if (sc->sc_type >= WM_T_ICH8) {
   5744 		reg = CSR_READ(sc, WMREG_GCR);
   5745 		/*
   5746 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5747 		 * default after reset.
   5748 		 */
   5749 		if (sc->sc_type == WM_T_ICH8)
   5750 			reg |= GCR_NO_SNOOP_ALL;
   5751 		else
   5752 			reg &= ~GCR_NO_SNOOP_ALL;
   5753 		CSR_WRITE(sc, WMREG_GCR, reg);
   5754 	}
   5755 	if ((sc->sc_type >= WM_T_ICH8)
   5756 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5757 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5758 
   5759 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5760 		reg |= CTRL_EXT_RO_DIS;
   5761 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5762 	}
   5763 
   5764 	/* Calculate (E)ITR value */
   5765 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5766 		/*
   5767 		 * For NEWQUEUE's EITR (except for 82575).
   5768 		 * 82575's EITR should be set same throttling value as other
   5769 		 * old controllers' ITR because the interrupt/sec calculation
   5770 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5771 		 *
   5772 		 * 82574's EITR should be set same throttling value as ITR.
   5773 		 *
   5774 		 * For N interrupts/sec, set this value to:
   5775 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5776 		 */
   5777 		sc->sc_itr_init = 450;
   5778 	} else if (sc->sc_type >= WM_T_82543) {
   5779 		/*
   5780 		 * Set up the interrupt throttling register (units of 256ns)
   5781 		 * Note that a footnote in Intel's documentation says this
   5782 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5783 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5784 		 * that that is also true for the 1024ns units of the other
   5785 		 * interrupt-related timer registers -- so, really, we ought
   5786 		 * to divide this value by 4 when the link speed is low.
   5787 		 *
   5788 		 * XXX implement this division at link speed change!
   5789 		 */
   5790 
   5791 		/*
   5792 		 * For N interrupts/sec, set this value to:
   5793 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5794 		 * absolute and packet timer values to this value
   5795 		 * divided by 4 to get "simple timer" behavior.
   5796 		 */
   5797 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5798 	}
   5799 
   5800 	error = wm_init_txrx_queues(sc);
   5801 	if (error)
   5802 		goto out;
   5803 
   5804 	/*
   5805 	 * Clear out the VLAN table -- we don't use it (yet).
   5806 	 */
   5807 	CSR_WRITE(sc, WMREG_VET, 0);
   5808 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5809 		trynum = 10; /* Due to hw errata */
   5810 	else
   5811 		trynum = 1;
   5812 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5813 		for (j = 0; j < trynum; j++)
   5814 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5815 
   5816 	/*
   5817 	 * Set up flow-control parameters.
   5818 	 *
   5819 	 * XXX Values could probably stand some tuning.
   5820 	 */
   5821 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5822 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5823 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5824 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5825 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5826 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5827 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5828 	}
   5829 
   5830 	sc->sc_fcrtl = FCRTL_DFLT;
   5831 	if (sc->sc_type < WM_T_82543) {
   5832 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5833 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5834 	} else {
   5835 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5836 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5837 	}
   5838 
   5839 	if (sc->sc_type == WM_T_80003)
   5840 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5841 	else
   5842 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5843 
   5844 	/* Writes the control register. */
   5845 	wm_set_vlan(sc);
   5846 
   5847 	if (sc->sc_flags & WM_F_HAS_MII) {
   5848 		uint16_t kmreg;
   5849 
   5850 		switch (sc->sc_type) {
   5851 		case WM_T_80003:
   5852 		case WM_T_ICH8:
   5853 		case WM_T_ICH9:
   5854 		case WM_T_ICH10:
   5855 		case WM_T_PCH:
   5856 		case WM_T_PCH2:
   5857 		case WM_T_PCH_LPT:
   5858 		case WM_T_PCH_SPT:
   5859 		case WM_T_PCH_CNP:
   5860 			/*
   5861 			 * Set the mac to wait the maximum time between each
   5862 			 * iteration and increase the max iterations when
   5863 			 * polling the phy; this fixes erroneous timeouts at
   5864 			 * 10Mbps.
   5865 			 */
   5866 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5867 			    0xFFFF);
   5868 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5869 			    &kmreg);
   5870 			kmreg |= 0x3F;
   5871 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5872 			    kmreg);
   5873 			break;
   5874 		default:
   5875 			break;
   5876 		}
   5877 
   5878 		if (sc->sc_type == WM_T_80003) {
   5879 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5880 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5881 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5882 
   5883 			/* Bypass RX and TX FIFO's */
   5884 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5885 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5886 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5887 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5888 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5889 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5890 		}
   5891 	}
   5892 #if 0
   5893 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5894 #endif
   5895 
   5896 	/* Set up checksum offload parameters. */
   5897 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5898 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5899 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5900 		reg |= RXCSUM_IPOFL;
   5901 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5902 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5903 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5904 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5905 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5906 
   5907 	/* Set registers about MSI-X */
   5908 	if (wm_is_using_msix(sc)) {
   5909 		uint32_t ivar;
   5910 		struct wm_queue *wmq;
   5911 		int qid, qintr_idx;
   5912 
   5913 		if (sc->sc_type == WM_T_82575) {
   5914 			/* Interrupt control */
   5915 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5916 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5917 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5918 
   5919 			/* TX and RX */
   5920 			for (i = 0; i < sc->sc_nqueues; i++) {
   5921 				wmq = &sc->sc_queue[i];
   5922 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5923 				    EITR_TX_QUEUE(wmq->wmq_id)
   5924 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5925 			}
   5926 			/* Link status */
   5927 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5928 			    EITR_OTHER);
   5929 		} else if (sc->sc_type == WM_T_82574) {
   5930 			/* Interrupt control */
   5931 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5932 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5933 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5934 
   5935 			/*
   5936 			 * workaround issue with spurious interrupts
   5937 			 * in MSI-X mode.
   5938 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5939 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5940 			 */
   5941 			reg = CSR_READ(sc, WMREG_RFCTL);
   5942 			reg |= WMREG_RFCTL_ACKDIS;
   5943 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5944 
   5945 			ivar = 0;
   5946 			/* TX and RX */
   5947 			for (i = 0; i < sc->sc_nqueues; i++) {
   5948 				wmq = &sc->sc_queue[i];
   5949 				qid = wmq->wmq_id;
   5950 				qintr_idx = wmq->wmq_intr_idx;
   5951 
   5952 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5953 				    IVAR_TX_MASK_Q_82574(qid));
   5954 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5955 				    IVAR_RX_MASK_Q_82574(qid));
   5956 			}
   5957 			/* Link status */
   5958 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5959 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5960 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5961 		} else {
   5962 			/* Interrupt control */
   5963 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5964 			    | GPIE_EIAME | GPIE_PBA);
   5965 
   5966 			switch (sc->sc_type) {
   5967 			case WM_T_82580:
   5968 			case WM_T_I350:
   5969 			case WM_T_I354:
   5970 			case WM_T_I210:
   5971 			case WM_T_I211:
   5972 				/* TX and RX */
   5973 				for (i = 0; i < sc->sc_nqueues; i++) {
   5974 					wmq = &sc->sc_queue[i];
   5975 					qid = wmq->wmq_id;
   5976 					qintr_idx = wmq->wmq_intr_idx;
   5977 
   5978 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5979 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5980 					ivar |= __SHIFTIN((qintr_idx
   5981 						| IVAR_VALID),
   5982 					    IVAR_TX_MASK_Q(qid));
   5983 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5984 					ivar |= __SHIFTIN((qintr_idx
   5985 						| IVAR_VALID),
   5986 					    IVAR_RX_MASK_Q(qid));
   5987 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5988 				}
   5989 				break;
   5990 			case WM_T_82576:
   5991 				/* TX and RX */
   5992 				for (i = 0; i < sc->sc_nqueues; i++) {
   5993 					wmq = &sc->sc_queue[i];
   5994 					qid = wmq->wmq_id;
   5995 					qintr_idx = wmq->wmq_intr_idx;
   5996 
   5997 					ivar = CSR_READ(sc,
   5998 					    WMREG_IVAR_Q_82576(qid));
   5999 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6000 					ivar |= __SHIFTIN((qintr_idx
   6001 						| IVAR_VALID),
   6002 					    IVAR_TX_MASK_Q_82576(qid));
   6003 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6004 					ivar |= __SHIFTIN((qintr_idx
   6005 						| IVAR_VALID),
   6006 					    IVAR_RX_MASK_Q_82576(qid));
   6007 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6008 					    ivar);
   6009 				}
   6010 				break;
   6011 			default:
   6012 				break;
   6013 			}
   6014 
   6015 			/* Link status */
   6016 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6017 			    IVAR_MISC_OTHER);
   6018 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6019 		}
   6020 
   6021 		if (wm_is_using_multiqueue(sc)) {
   6022 			wm_init_rss(sc);
   6023 
   6024 			/*
   6025 			** NOTE: Receive Full-Packet Checksum Offload
   6026 			** is mutually exclusive with Multiqueue. However
   6027 			** this is not the same as TCP/IP checksums which
   6028 			** still work.
   6029 			*/
   6030 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6031 			reg |= RXCSUM_PCSD;
   6032 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6033 		}
   6034 	}
   6035 
   6036 	/* Set up the interrupt registers. */
   6037 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6038 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6039 	    ICR_RXO | ICR_RXT0;
   6040 	if (wm_is_using_msix(sc)) {
   6041 		uint32_t mask;
   6042 		struct wm_queue *wmq;
   6043 
   6044 		switch (sc->sc_type) {
   6045 		case WM_T_82574:
   6046 			mask = 0;
   6047 			for (i = 0; i < sc->sc_nqueues; i++) {
   6048 				wmq = &sc->sc_queue[i];
   6049 				mask |= ICR_TXQ(wmq->wmq_id);
   6050 				mask |= ICR_RXQ(wmq->wmq_id);
   6051 			}
   6052 			mask |= ICR_OTHER;
   6053 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6054 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6055 			break;
   6056 		default:
   6057 			if (sc->sc_type == WM_T_82575) {
   6058 				mask = 0;
   6059 				for (i = 0; i < sc->sc_nqueues; i++) {
   6060 					wmq = &sc->sc_queue[i];
   6061 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6062 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6063 				}
   6064 				mask |= EITR_OTHER;
   6065 			} else {
   6066 				mask = 0;
   6067 				for (i = 0; i < sc->sc_nqueues; i++) {
   6068 					wmq = &sc->sc_queue[i];
   6069 					mask |= 1 << wmq->wmq_intr_idx;
   6070 				}
   6071 				mask |= 1 << sc->sc_link_intr_idx;
   6072 			}
   6073 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6074 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6075 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6076 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   6077 			break;
   6078 		}
   6079 	} else
   6080 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6081 
   6082 	/* Set up the inter-packet gap. */
   6083 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6084 
   6085 	if (sc->sc_type >= WM_T_82543) {
   6086 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6087 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6088 			wm_itrs_writereg(sc, wmq);
   6089 		}
   6090 		/*
   6091 		 * Link interrupts occur much less than TX
   6092 		 * interrupts and RX interrupts. So, we don't
   6093 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6094 		 * FreeBSD's if_igb.
   6095 		 */
   6096 	}
   6097 
   6098 	/* Set the VLAN ethernetype. */
   6099 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6100 
   6101 	/*
   6102 	 * Set up the transmit control register; we start out with
   6103 	 * a collision distance suitable for FDX, but update it whe
   6104 	 * we resolve the media type.
   6105 	 */
   6106 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6107 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6108 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6109 	if (sc->sc_type >= WM_T_82571)
   6110 		sc->sc_tctl |= TCTL_MULR;
   6111 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6112 
   6113 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6114 		/* Write TDT after TCTL.EN is set. See the document. */
   6115 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6116 	}
   6117 
   6118 	if (sc->sc_type == WM_T_80003) {
   6119 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6120 		reg &= ~TCTL_EXT_GCEX_MASK;
   6121 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6122 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6123 	}
   6124 
   6125 	/* Set the media. */
   6126 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6127 		goto out;
   6128 
   6129 	/* Configure for OS presence */
   6130 	wm_init_manageability(sc);
   6131 
   6132 	/*
   6133 	 * Set up the receive control register; we actually program the
   6134 	 * register when we set the receive filter. Use multicast address
   6135 	 * offset type 0.
   6136 	 *
   6137 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6138 	 * don't enable that feature.
   6139 	 */
   6140 	sc->sc_mchash_type = 0;
   6141 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6142 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6143 
   6144 	/*
   6145 	 * 82574 use one buffer extended Rx descriptor.
   6146 	 */
   6147 	if (sc->sc_type == WM_T_82574)
   6148 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6149 
   6150 	/*
   6151 	 * The I350 has a bug where it always strips the CRC whether
   6152 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6153 	 */
   6154 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6155 	    || (sc->sc_type == WM_T_I210))
   6156 		sc->sc_rctl |= RCTL_SECRC;
   6157 
   6158 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6159 	    && (ifp->if_mtu > ETHERMTU)) {
   6160 		sc->sc_rctl |= RCTL_LPE;
   6161 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6162 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6163 	}
   6164 
   6165 	if (MCLBYTES == 2048)
   6166 		sc->sc_rctl |= RCTL_2k;
   6167 	else {
   6168 		if (sc->sc_type >= WM_T_82543) {
   6169 			switch (MCLBYTES) {
   6170 			case 4096:
   6171 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6172 				break;
   6173 			case 8192:
   6174 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6175 				break;
   6176 			case 16384:
   6177 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6178 				break;
   6179 			default:
   6180 				panic("wm_init: MCLBYTES %d unsupported",
   6181 				    MCLBYTES);
   6182 				break;
   6183 			}
   6184 		} else
   6185 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6186 	}
   6187 
   6188 	/* Enable ECC */
   6189 	switch (sc->sc_type) {
   6190 	case WM_T_82571:
   6191 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6192 		reg |= PBA_ECC_CORR_EN;
   6193 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6194 		break;
   6195 	case WM_T_PCH_LPT:
   6196 	case WM_T_PCH_SPT:
   6197 	case WM_T_PCH_CNP:
   6198 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6199 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6200 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6201 
   6202 		sc->sc_ctrl |= CTRL_MEHE;
   6203 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6204 		break;
   6205 	default:
   6206 		break;
   6207 	}
   6208 
   6209 	/*
   6210 	 * Set the receive filter.
   6211 	 *
   6212 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6213 	 * the setting of RCTL.EN in wm_set_filter()
   6214 	 */
   6215 	wm_set_filter(sc);
   6216 
   6217 	/* On 575 and later set RDT only if RX enabled */
   6218 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6219 		int qidx;
   6220 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6221 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6222 			for (i = 0; i < WM_NRXDESC; i++) {
   6223 				mutex_enter(rxq->rxq_lock);
   6224 				wm_init_rxdesc(rxq, i);
   6225 				mutex_exit(rxq->rxq_lock);
   6226 
   6227 			}
   6228 		}
   6229 	}
   6230 
   6231 	wm_unset_stopping_flags(sc);
   6232 
   6233 	/* Start the one second link check clock. */
   6234 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6235 
   6236 	/* ...all done! */
   6237 	ifp->if_flags |= IFF_RUNNING;
   6238 	ifp->if_flags &= ~IFF_OACTIVE;
   6239 
   6240  out:
   6241 	/* Save last flags for the callback */
   6242 	sc->sc_if_flags = ifp->if_flags;
   6243 	sc->sc_ec_capenable = ec->ec_capenable;
   6244 	if (error)
   6245 		log(LOG_ERR, "%s: interface not running\n",
   6246 		    device_xname(sc->sc_dev));
   6247 	return error;
   6248 }
   6249 
   6250 /*
   6251  * wm_stop:		[ifnet interface function]
   6252  *
   6253  *	Stop transmission on the interface.
   6254  */
   6255 static void
   6256 wm_stop(struct ifnet *ifp, int disable)
   6257 {
   6258 	struct wm_softc *sc = ifp->if_softc;
   6259 
   6260 	WM_CORE_LOCK(sc);
   6261 	wm_stop_locked(ifp, disable);
   6262 	WM_CORE_UNLOCK(sc);
   6263 }
   6264 
   6265 static void
   6266 wm_stop_locked(struct ifnet *ifp, int disable)
   6267 {
   6268 	struct wm_softc *sc = ifp->if_softc;
   6269 	struct wm_txsoft *txs;
   6270 	int i, qidx;
   6271 
   6272 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6273 		device_xname(sc->sc_dev), __func__));
   6274 	KASSERT(WM_CORE_LOCKED(sc));
   6275 
   6276 	wm_set_stopping_flags(sc);
   6277 
   6278 	/* Stop the one second clock. */
   6279 	callout_stop(&sc->sc_tick_ch);
   6280 
   6281 	/* Stop the 82547 Tx FIFO stall check timer. */
   6282 	if (sc->sc_type == WM_T_82547)
   6283 		callout_stop(&sc->sc_txfifo_ch);
   6284 
   6285 	if (sc->sc_flags & WM_F_HAS_MII) {
   6286 		/* Down the MII. */
   6287 		mii_down(&sc->sc_mii);
   6288 	} else {
   6289 #if 0
   6290 		/* Should we clear PHY's status properly? */
   6291 		wm_reset(sc);
   6292 #endif
   6293 	}
   6294 
   6295 	/* Stop the transmit and receive processes. */
   6296 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6297 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6298 	sc->sc_rctl &= ~RCTL_EN;
   6299 
   6300 	/*
   6301 	 * Clear the interrupt mask to ensure the device cannot assert its
   6302 	 * interrupt line.
   6303 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6304 	 * service any currently pending or shared interrupt.
   6305 	 */
   6306 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6307 	sc->sc_icr = 0;
   6308 	if (wm_is_using_msix(sc)) {
   6309 		if (sc->sc_type != WM_T_82574) {
   6310 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6311 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6312 		} else
   6313 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6314 	}
   6315 
   6316 	/* Release any queued transmit buffers. */
   6317 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6318 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6319 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6320 		mutex_enter(txq->txq_lock);
   6321 		txq->txq_sending = false; /* ensure watchdog disabled */
   6322 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6323 			txs = &txq->txq_soft[i];
   6324 			if (txs->txs_mbuf != NULL) {
   6325 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6326 				m_freem(txs->txs_mbuf);
   6327 				txs->txs_mbuf = NULL;
   6328 			}
   6329 		}
   6330 		mutex_exit(txq->txq_lock);
   6331 	}
   6332 
   6333 	/* Mark the interface as down and cancel the watchdog timer. */
   6334 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6335 
   6336 	if (disable) {
   6337 		for (i = 0; i < sc->sc_nqueues; i++) {
   6338 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6339 			mutex_enter(rxq->rxq_lock);
   6340 			wm_rxdrain(rxq);
   6341 			mutex_exit(rxq->rxq_lock);
   6342 		}
   6343 	}
   6344 
   6345 #if 0 /* notyet */
   6346 	if (sc->sc_type >= WM_T_82544)
   6347 		CSR_WRITE(sc, WMREG_WUC, 0);
   6348 #endif
   6349 }
   6350 
   6351 static void
   6352 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6353 {
   6354 	struct mbuf *m;
   6355 	int i;
   6356 
   6357 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6358 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6359 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6360 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6361 		    m->m_data, m->m_len, m->m_flags);
   6362 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6363 	    i, i == 1 ? "" : "s");
   6364 }
   6365 
   6366 /*
   6367  * wm_82547_txfifo_stall:
   6368  *
   6369  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6370  *	reset the FIFO pointers, and restart packet transmission.
   6371  */
   6372 static void
   6373 wm_82547_txfifo_stall(void *arg)
   6374 {
   6375 	struct wm_softc *sc = arg;
   6376 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6377 
   6378 	mutex_enter(txq->txq_lock);
   6379 
   6380 	if (txq->txq_stopping)
   6381 		goto out;
   6382 
   6383 	if (txq->txq_fifo_stall) {
   6384 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6385 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6386 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6387 			/*
   6388 			 * Packets have drained.  Stop transmitter, reset
   6389 			 * FIFO pointers, restart transmitter, and kick
   6390 			 * the packet queue.
   6391 			 */
   6392 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6393 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6394 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6395 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6396 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6397 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6398 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6399 			CSR_WRITE_FLUSH(sc);
   6400 
   6401 			txq->txq_fifo_head = 0;
   6402 			txq->txq_fifo_stall = 0;
   6403 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6404 		} else {
   6405 			/*
   6406 			 * Still waiting for packets to drain; try again in
   6407 			 * another tick.
   6408 			 */
   6409 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6410 		}
   6411 	}
   6412 
   6413 out:
   6414 	mutex_exit(txq->txq_lock);
   6415 }
   6416 
   6417 /*
   6418  * wm_82547_txfifo_bugchk:
   6419  *
   6420  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6421  *	prevent enqueueing a packet that would wrap around the end
   6422  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6423  *
   6424  *	We do this by checking the amount of space before the end
   6425  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6426  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6427  *	the internal FIFO pointers to the beginning, and restart
   6428  *	transmission on the interface.
   6429  */
   6430 #define	WM_FIFO_HDR		0x10
   6431 #define	WM_82547_PAD_LEN	0x3e0
   6432 static int
   6433 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6434 {
   6435 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6436 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6437 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6438 
   6439 	/* Just return if already stalled. */
   6440 	if (txq->txq_fifo_stall)
   6441 		return 1;
   6442 
   6443 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6444 		/* Stall only occurs in half-duplex mode. */
   6445 		goto send_packet;
   6446 	}
   6447 
   6448 	if (len >= WM_82547_PAD_LEN + space) {
   6449 		txq->txq_fifo_stall = 1;
   6450 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6451 		return 1;
   6452 	}
   6453 
   6454  send_packet:
   6455 	txq->txq_fifo_head += len;
   6456 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6457 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6458 
   6459 	return 0;
   6460 }
   6461 
   6462 static int
   6463 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6464 {
   6465 	int error;
   6466 
   6467 	/*
   6468 	 * Allocate the control data structures, and create and load the
   6469 	 * DMA map for it.
   6470 	 *
   6471 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6472 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6473 	 * both sets within the same 4G segment.
   6474 	 */
   6475 	if (sc->sc_type < WM_T_82544)
   6476 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6477 	else
   6478 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6479 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6480 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6481 	else
   6482 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6483 
   6484 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6485 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6486 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6487 		aprint_error_dev(sc->sc_dev,
   6488 		    "unable to allocate TX control data, error = %d\n",
   6489 		    error);
   6490 		goto fail_0;
   6491 	}
   6492 
   6493 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6494 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6495 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6496 		aprint_error_dev(sc->sc_dev,
   6497 		    "unable to map TX control data, error = %d\n", error);
   6498 		goto fail_1;
   6499 	}
   6500 
   6501 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6502 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6503 		aprint_error_dev(sc->sc_dev,
   6504 		    "unable to create TX control data DMA map, error = %d\n",
   6505 		    error);
   6506 		goto fail_2;
   6507 	}
   6508 
   6509 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6510 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6511 		aprint_error_dev(sc->sc_dev,
   6512 		    "unable to load TX control data DMA map, error = %d\n",
   6513 		    error);
   6514 		goto fail_3;
   6515 	}
   6516 
   6517 	return 0;
   6518 
   6519  fail_3:
   6520 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6521  fail_2:
   6522 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6523 	    WM_TXDESCS_SIZE(txq));
   6524  fail_1:
   6525 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6526  fail_0:
   6527 	return error;
   6528 }
   6529 
   6530 static void
   6531 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6532 {
   6533 
   6534 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6535 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6536 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6537 	    WM_TXDESCS_SIZE(txq));
   6538 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6539 }
   6540 
   6541 static int
   6542 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6543 {
   6544 	int error;
   6545 	size_t rxq_descs_size;
   6546 
   6547 	/*
   6548 	 * Allocate the control data structures, and create and load the
   6549 	 * DMA map for it.
   6550 	 *
   6551 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6552 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6553 	 * both sets within the same 4G segment.
   6554 	 */
   6555 	rxq->rxq_ndesc = WM_NRXDESC;
   6556 	if (sc->sc_type == WM_T_82574)
   6557 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6558 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6559 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6560 	else
   6561 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6562 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6563 
   6564 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6565 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6566 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6567 		aprint_error_dev(sc->sc_dev,
   6568 		    "unable to allocate RX control data, error = %d\n",
   6569 		    error);
   6570 		goto fail_0;
   6571 	}
   6572 
   6573 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6574 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6575 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6576 		aprint_error_dev(sc->sc_dev,
   6577 		    "unable to map RX control data, error = %d\n", error);
   6578 		goto fail_1;
   6579 	}
   6580 
   6581 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6582 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6583 		aprint_error_dev(sc->sc_dev,
   6584 		    "unable to create RX control data DMA map, error = %d\n",
   6585 		    error);
   6586 		goto fail_2;
   6587 	}
   6588 
   6589 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6590 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6591 		aprint_error_dev(sc->sc_dev,
   6592 		    "unable to load RX control data DMA map, error = %d\n",
   6593 		    error);
   6594 		goto fail_3;
   6595 	}
   6596 
   6597 	return 0;
   6598 
   6599  fail_3:
   6600 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6601  fail_2:
   6602 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6603 	    rxq_descs_size);
   6604  fail_1:
   6605 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6606  fail_0:
   6607 	return error;
   6608 }
   6609 
   6610 static void
   6611 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6612 {
   6613 
   6614 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6615 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6616 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6617 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6618 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6619 }
   6620 
   6621 
   6622 static int
   6623 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6624 {
   6625 	int i, error;
   6626 
   6627 	/* Create the transmit buffer DMA maps. */
   6628 	WM_TXQUEUELEN(txq) =
   6629 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6630 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6631 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6632 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6633 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6634 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6635 			aprint_error_dev(sc->sc_dev,
   6636 			    "unable to create Tx DMA map %d, error = %d\n",
   6637 			    i, error);
   6638 			goto fail;
   6639 		}
   6640 	}
   6641 
   6642 	return 0;
   6643 
   6644  fail:
   6645 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6646 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6647 			bus_dmamap_destroy(sc->sc_dmat,
   6648 			    txq->txq_soft[i].txs_dmamap);
   6649 	}
   6650 	return error;
   6651 }
   6652 
   6653 static void
   6654 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6655 {
   6656 	int i;
   6657 
   6658 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6659 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6660 			bus_dmamap_destroy(sc->sc_dmat,
   6661 			    txq->txq_soft[i].txs_dmamap);
   6662 	}
   6663 }
   6664 
   6665 static int
   6666 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6667 {
   6668 	int i, error;
   6669 
   6670 	/* Create the receive buffer DMA maps. */
   6671 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6672 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6673 			    MCLBYTES, 0, 0,
   6674 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6675 			aprint_error_dev(sc->sc_dev,
   6676 			    "unable to create Rx DMA map %d error = %d\n",
   6677 			    i, error);
   6678 			goto fail;
   6679 		}
   6680 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6681 	}
   6682 
   6683 	return 0;
   6684 
   6685  fail:
   6686 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6687 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6688 			bus_dmamap_destroy(sc->sc_dmat,
   6689 			    rxq->rxq_soft[i].rxs_dmamap);
   6690 	}
   6691 	return error;
   6692 }
   6693 
   6694 static void
   6695 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6696 {
   6697 	int i;
   6698 
   6699 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6700 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6701 			bus_dmamap_destroy(sc->sc_dmat,
   6702 			    rxq->rxq_soft[i].rxs_dmamap);
   6703 	}
   6704 }
   6705 
   6706 /*
   6707  * wm_alloc_quques:
   6708  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6709  */
   6710 static int
   6711 wm_alloc_txrx_queues(struct wm_softc *sc)
   6712 {
   6713 	int i, error, tx_done, rx_done;
   6714 
   6715 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6716 	    KM_SLEEP);
   6717 	if (sc->sc_queue == NULL) {
   6718 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6719 		error = ENOMEM;
   6720 		goto fail_0;
   6721 	}
   6722 
   6723 	/*
   6724 	 * For transmission
   6725 	 */
   6726 	error = 0;
   6727 	tx_done = 0;
   6728 	for (i = 0; i < sc->sc_nqueues; i++) {
   6729 #ifdef WM_EVENT_COUNTERS
   6730 		int j;
   6731 		const char *xname;
   6732 #endif
   6733 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6734 		txq->txq_sc = sc;
   6735 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6736 
   6737 		error = wm_alloc_tx_descs(sc, txq);
   6738 		if (error)
   6739 			break;
   6740 		error = wm_alloc_tx_buffer(sc, txq);
   6741 		if (error) {
   6742 			wm_free_tx_descs(sc, txq);
   6743 			break;
   6744 		}
   6745 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6746 		if (txq->txq_interq == NULL) {
   6747 			wm_free_tx_descs(sc, txq);
   6748 			wm_free_tx_buffer(sc, txq);
   6749 			error = ENOMEM;
   6750 			break;
   6751 		}
   6752 
   6753 #ifdef WM_EVENT_COUNTERS
   6754 		xname = device_xname(sc->sc_dev);
   6755 
   6756 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6757 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6758 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6759 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6760 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6761 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6762 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6763 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6764 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6765 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6766 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6767 
   6768 		for (j = 0; j < WM_NTXSEGS; j++) {
   6769 			snprintf(txq->txq_txseg_evcnt_names[j],
   6770 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6771 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6772 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6773 		}
   6774 
   6775 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6776 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6777 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6778 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6779 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6780 #endif /* WM_EVENT_COUNTERS */
   6781 
   6782 		tx_done++;
   6783 	}
   6784 	if (error)
   6785 		goto fail_1;
   6786 
   6787 	/*
   6788 	 * For recieve
   6789 	 */
   6790 	error = 0;
   6791 	rx_done = 0;
   6792 	for (i = 0; i < sc->sc_nqueues; i++) {
   6793 #ifdef WM_EVENT_COUNTERS
   6794 		const char *xname;
   6795 #endif
   6796 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6797 		rxq->rxq_sc = sc;
   6798 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6799 
   6800 		error = wm_alloc_rx_descs(sc, rxq);
   6801 		if (error)
   6802 			break;
   6803 
   6804 		error = wm_alloc_rx_buffer(sc, rxq);
   6805 		if (error) {
   6806 			wm_free_rx_descs(sc, rxq);
   6807 			break;
   6808 		}
   6809 
   6810 #ifdef WM_EVENT_COUNTERS
   6811 		xname = device_xname(sc->sc_dev);
   6812 
   6813 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6814 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6815 
   6816 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6817 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6818 #endif /* WM_EVENT_COUNTERS */
   6819 
   6820 		rx_done++;
   6821 	}
   6822 	if (error)
   6823 		goto fail_2;
   6824 
   6825 	return 0;
   6826 
   6827  fail_2:
   6828 	for (i = 0; i < rx_done; i++) {
   6829 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6830 		wm_free_rx_buffer(sc, rxq);
   6831 		wm_free_rx_descs(sc, rxq);
   6832 		if (rxq->rxq_lock)
   6833 			mutex_obj_free(rxq->rxq_lock);
   6834 	}
   6835  fail_1:
   6836 	for (i = 0; i < tx_done; i++) {
   6837 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6838 		pcq_destroy(txq->txq_interq);
   6839 		wm_free_tx_buffer(sc, txq);
   6840 		wm_free_tx_descs(sc, txq);
   6841 		if (txq->txq_lock)
   6842 			mutex_obj_free(txq->txq_lock);
   6843 	}
   6844 
   6845 	kmem_free(sc->sc_queue,
   6846 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6847  fail_0:
   6848 	return error;
   6849 }
   6850 
   6851 /*
   6852  * wm_free_quques:
   6853  *	Free {tx,rx}descs and {tx,rx} buffers
   6854  */
   6855 static void
   6856 wm_free_txrx_queues(struct wm_softc *sc)
   6857 {
   6858 	int i;
   6859 
   6860 	for (i = 0; i < sc->sc_nqueues; i++) {
   6861 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6862 
   6863 #ifdef WM_EVENT_COUNTERS
   6864 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6865 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6866 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6867 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6868 #endif /* WM_EVENT_COUNTERS */
   6869 
   6870 		wm_free_rx_buffer(sc, rxq);
   6871 		wm_free_rx_descs(sc, rxq);
   6872 		if (rxq->rxq_lock)
   6873 			mutex_obj_free(rxq->rxq_lock);
   6874 	}
   6875 
   6876 	for (i = 0; i < sc->sc_nqueues; i++) {
   6877 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6878 		struct mbuf *m;
   6879 #ifdef WM_EVENT_COUNTERS
   6880 		int j;
   6881 
   6882 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6883 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6884 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6885 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6886 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6887 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6888 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6889 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6890 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6891 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6892 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6893 
   6894 		for (j = 0; j < WM_NTXSEGS; j++)
   6895 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6896 
   6897 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6898 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6899 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6900 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6901 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6902 #endif /* WM_EVENT_COUNTERS */
   6903 
   6904 		/* drain txq_interq */
   6905 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6906 			m_freem(m);
   6907 		pcq_destroy(txq->txq_interq);
   6908 
   6909 		wm_free_tx_buffer(sc, txq);
   6910 		wm_free_tx_descs(sc, txq);
   6911 		if (txq->txq_lock)
   6912 			mutex_obj_free(txq->txq_lock);
   6913 	}
   6914 
   6915 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6916 }
   6917 
   6918 static void
   6919 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6920 {
   6921 
   6922 	KASSERT(mutex_owned(txq->txq_lock));
   6923 
   6924 	/* Initialize the transmit descriptor ring. */
   6925 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6926 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6927 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6928 	txq->txq_free = WM_NTXDESC(txq);
   6929 	txq->txq_next = 0;
   6930 }
   6931 
   6932 static void
   6933 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6934     struct wm_txqueue *txq)
   6935 {
   6936 
   6937 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6938 		device_xname(sc->sc_dev), __func__));
   6939 	KASSERT(mutex_owned(txq->txq_lock));
   6940 
   6941 	if (sc->sc_type < WM_T_82543) {
   6942 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6943 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6944 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6945 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6946 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6947 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6948 	} else {
   6949 		int qid = wmq->wmq_id;
   6950 
   6951 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6952 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6953 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6954 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6955 
   6956 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6957 			/*
   6958 			 * Don't write TDT before TCTL.EN is set.
   6959 			 * See the document.
   6960 			 */
   6961 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6962 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6963 			    | TXDCTL_WTHRESH(0));
   6964 		else {
   6965 			/* XXX should update with AIM? */
   6966 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6967 			if (sc->sc_type >= WM_T_82540) {
   6968 				/* should be same */
   6969 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6970 			}
   6971 
   6972 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6973 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6974 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6975 		}
   6976 	}
   6977 }
   6978 
   6979 static void
   6980 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6981 {
   6982 	int i;
   6983 
   6984 	KASSERT(mutex_owned(txq->txq_lock));
   6985 
   6986 	/* Initialize the transmit job descriptors. */
   6987 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6988 		txq->txq_soft[i].txs_mbuf = NULL;
   6989 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6990 	txq->txq_snext = 0;
   6991 	txq->txq_sdirty = 0;
   6992 }
   6993 
   6994 static void
   6995 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6996     struct wm_txqueue *txq)
   6997 {
   6998 
   6999 	KASSERT(mutex_owned(txq->txq_lock));
   7000 
   7001 	/*
   7002 	 * Set up some register offsets that are different between
   7003 	 * the i82542 and the i82543 and later chips.
   7004 	 */
   7005 	if (sc->sc_type < WM_T_82543)
   7006 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7007 	else
   7008 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7009 
   7010 	wm_init_tx_descs(sc, txq);
   7011 	wm_init_tx_regs(sc, wmq, txq);
   7012 	wm_init_tx_buffer(sc, txq);
   7013 
   7014 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7015 	txq->txq_sending = false;
   7016 }
   7017 
   7018 static void
   7019 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7020     struct wm_rxqueue *rxq)
   7021 {
   7022 
   7023 	KASSERT(mutex_owned(rxq->rxq_lock));
   7024 
   7025 	/*
   7026 	 * Initialize the receive descriptor and receive job
   7027 	 * descriptor rings.
   7028 	 */
   7029 	if (sc->sc_type < WM_T_82543) {
   7030 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7031 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7032 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7033 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7034 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7035 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7036 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7037 
   7038 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7039 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7040 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7041 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7042 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7043 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7044 	} else {
   7045 		int qid = wmq->wmq_id;
   7046 
   7047 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7048 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7049 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7050 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7051 
   7052 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7053 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7054 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7055 
   7056 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7057 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7058 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7059 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7060 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7061 			    | RXDCTL_WTHRESH(1));
   7062 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7063 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7064 		} else {
   7065 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7066 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7067 			/* XXX should update with AIM? */
   7068 			CSR_WRITE(sc, WMREG_RDTR,
   7069 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7070 			/* MUST be same */
   7071 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7072 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7073 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7074 		}
   7075 	}
   7076 }
   7077 
   7078 static int
   7079 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7080 {
   7081 	struct wm_rxsoft *rxs;
   7082 	int error, i;
   7083 
   7084 	KASSERT(mutex_owned(rxq->rxq_lock));
   7085 
   7086 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7087 		rxs = &rxq->rxq_soft[i];
   7088 		if (rxs->rxs_mbuf == NULL) {
   7089 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7090 				log(LOG_ERR, "%s: unable to allocate or map "
   7091 				    "rx buffer %d, error = %d\n",
   7092 				    device_xname(sc->sc_dev), i, error);
   7093 				/*
   7094 				 * XXX Should attempt to run with fewer receive
   7095 				 * XXX buffers instead of just failing.
   7096 				 */
   7097 				wm_rxdrain(rxq);
   7098 				return ENOMEM;
   7099 			}
   7100 		} else {
   7101 			/*
   7102 			 * For 82575 and 82576, the RX descriptors must be
   7103 			 * initialized after the setting of RCTL.EN in
   7104 			 * wm_set_filter()
   7105 			 */
   7106 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7107 				wm_init_rxdesc(rxq, i);
   7108 		}
   7109 	}
   7110 	rxq->rxq_ptr = 0;
   7111 	rxq->rxq_discard = 0;
   7112 	WM_RXCHAIN_RESET(rxq);
   7113 
   7114 	return 0;
   7115 }
   7116 
   7117 static int
   7118 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7119     struct wm_rxqueue *rxq)
   7120 {
   7121 
   7122 	KASSERT(mutex_owned(rxq->rxq_lock));
   7123 
   7124 	/*
   7125 	 * Set up some register offsets that are different between
   7126 	 * the i82542 and the i82543 and later chips.
   7127 	 */
   7128 	if (sc->sc_type < WM_T_82543)
   7129 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7130 	else
   7131 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7132 
   7133 	wm_init_rx_regs(sc, wmq, rxq);
   7134 	return wm_init_rx_buffer(sc, rxq);
   7135 }
   7136 
   7137 /*
   7138  * wm_init_quques:
   7139  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7140  */
   7141 static int
   7142 wm_init_txrx_queues(struct wm_softc *sc)
   7143 {
   7144 	int i, error = 0;
   7145 
   7146 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7147 		device_xname(sc->sc_dev), __func__));
   7148 
   7149 	for (i = 0; i < sc->sc_nqueues; i++) {
   7150 		struct wm_queue *wmq = &sc->sc_queue[i];
   7151 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7152 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7153 
   7154 		/*
   7155 		 * TODO
   7156 		 * Currently, use constant variable instead of AIM.
   7157 		 * Furthermore, the interrupt interval of multiqueue which use
   7158 		 * polling mode is less than default value.
   7159 		 * More tuning and AIM are required.
   7160 		 */
   7161 		if (wm_is_using_multiqueue(sc))
   7162 			wmq->wmq_itr = 50;
   7163 		else
   7164 			wmq->wmq_itr = sc->sc_itr_init;
   7165 		wmq->wmq_set_itr = true;
   7166 
   7167 		mutex_enter(txq->txq_lock);
   7168 		wm_init_tx_queue(sc, wmq, txq);
   7169 		mutex_exit(txq->txq_lock);
   7170 
   7171 		mutex_enter(rxq->rxq_lock);
   7172 		error = wm_init_rx_queue(sc, wmq, rxq);
   7173 		mutex_exit(rxq->rxq_lock);
   7174 		if (error)
   7175 			break;
   7176 	}
   7177 
   7178 	return error;
   7179 }
   7180 
   7181 /*
   7182  * wm_tx_offload:
   7183  *
   7184  *	Set up TCP/IP checksumming parameters for the
   7185  *	specified packet.
   7186  */
   7187 static int
   7188 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7189     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7190 {
   7191 	struct mbuf *m0 = txs->txs_mbuf;
   7192 	struct livengood_tcpip_ctxdesc *t;
   7193 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7194 	uint32_t ipcse;
   7195 	struct ether_header *eh;
   7196 	int offset, iphl;
   7197 	uint8_t fields;
   7198 
   7199 	/*
   7200 	 * XXX It would be nice if the mbuf pkthdr had offset
   7201 	 * fields for the protocol headers.
   7202 	 */
   7203 
   7204 	eh = mtod(m0, struct ether_header *);
   7205 	switch (htons(eh->ether_type)) {
   7206 	case ETHERTYPE_IP:
   7207 	case ETHERTYPE_IPV6:
   7208 		offset = ETHER_HDR_LEN;
   7209 		break;
   7210 
   7211 	case ETHERTYPE_VLAN:
   7212 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7213 		break;
   7214 
   7215 	default:
   7216 		/*
   7217 		 * Don't support this protocol or encapsulation.
   7218 		 */
   7219 		*fieldsp = 0;
   7220 		*cmdp = 0;
   7221 		return 0;
   7222 	}
   7223 
   7224 	if ((m0->m_pkthdr.csum_flags &
   7225 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7226 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7227 	} else
   7228 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7229 
   7230 	ipcse = offset + iphl - 1;
   7231 
   7232 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7233 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7234 	seg = 0;
   7235 	fields = 0;
   7236 
   7237 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7238 		int hlen = offset + iphl;
   7239 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7240 
   7241 		if (__predict_false(m0->m_len <
   7242 				    (hlen + sizeof(struct tcphdr)))) {
   7243 			/*
   7244 			 * TCP/IP headers are not in the first mbuf; we need
   7245 			 * to do this the slow and painful way. Let's just
   7246 			 * hope this doesn't happen very often.
   7247 			 */
   7248 			struct tcphdr th;
   7249 
   7250 			WM_Q_EVCNT_INCR(txq, tsopain);
   7251 
   7252 			m_copydata(m0, hlen, sizeof(th), &th);
   7253 			if (v4) {
   7254 				struct ip ip;
   7255 
   7256 				m_copydata(m0, offset, sizeof(ip), &ip);
   7257 				ip.ip_len = 0;
   7258 				m_copyback(m0,
   7259 				    offset + offsetof(struct ip, ip_len),
   7260 				    sizeof(ip.ip_len), &ip.ip_len);
   7261 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7262 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7263 			} else {
   7264 				struct ip6_hdr ip6;
   7265 
   7266 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7267 				ip6.ip6_plen = 0;
   7268 				m_copyback(m0,
   7269 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7270 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7271 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7272 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7273 			}
   7274 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7275 			    sizeof(th.th_sum), &th.th_sum);
   7276 
   7277 			hlen += th.th_off << 2;
   7278 		} else {
   7279 			/*
   7280 			 * TCP/IP headers are in the first mbuf; we can do
   7281 			 * this the easy way.
   7282 			 */
   7283 			struct tcphdr *th;
   7284 
   7285 			if (v4) {
   7286 				struct ip *ip =
   7287 				    (void *)(mtod(m0, char *) + offset);
   7288 				th = (void *)(mtod(m0, char *) + hlen);
   7289 
   7290 				ip->ip_len = 0;
   7291 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7292 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7293 			} else {
   7294 				struct ip6_hdr *ip6 =
   7295 				    (void *)(mtod(m0, char *) + offset);
   7296 				th = (void *)(mtod(m0, char *) + hlen);
   7297 
   7298 				ip6->ip6_plen = 0;
   7299 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7300 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7301 			}
   7302 			hlen += th->th_off << 2;
   7303 		}
   7304 
   7305 		if (v4) {
   7306 			WM_Q_EVCNT_INCR(txq, tso);
   7307 			cmdlen |= WTX_TCPIP_CMD_IP;
   7308 		} else {
   7309 			WM_Q_EVCNT_INCR(txq, tso6);
   7310 			ipcse = 0;
   7311 		}
   7312 		cmd |= WTX_TCPIP_CMD_TSE;
   7313 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7314 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7315 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7316 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7317 	}
   7318 
   7319 	/*
   7320 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7321 	 * offload feature, if we load the context descriptor, we
   7322 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7323 	 */
   7324 
   7325 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7326 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7327 	    WTX_TCPIP_IPCSE(ipcse);
   7328 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7329 		WM_Q_EVCNT_INCR(txq, ipsum);
   7330 		fields |= WTX_IXSM;
   7331 	}
   7332 
   7333 	offset += iphl;
   7334 
   7335 	if (m0->m_pkthdr.csum_flags &
   7336 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7337 		WM_Q_EVCNT_INCR(txq, tusum);
   7338 		fields |= WTX_TXSM;
   7339 		tucs = WTX_TCPIP_TUCSS(offset) |
   7340 		    WTX_TCPIP_TUCSO(offset +
   7341 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7342 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7343 	} else if ((m0->m_pkthdr.csum_flags &
   7344 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7345 		WM_Q_EVCNT_INCR(txq, tusum6);
   7346 		fields |= WTX_TXSM;
   7347 		tucs = WTX_TCPIP_TUCSS(offset) |
   7348 		    WTX_TCPIP_TUCSO(offset +
   7349 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7350 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7351 	} else {
   7352 		/* Just initialize it to a valid TCP context. */
   7353 		tucs = WTX_TCPIP_TUCSS(offset) |
   7354 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7355 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7356 	}
   7357 
   7358 	/*
   7359 	 * We don't have to write context descriptor for every packet
   7360 	 * except for 82574. For 82574, we must write context descriptor
   7361 	 * for every packet when we use two descriptor queues.
   7362 	 * It would be overhead to write context descriptor for every packet,
   7363 	 * however it does not cause problems.
   7364 	 */
   7365 	/* Fill in the context descriptor. */
   7366 	t = (struct livengood_tcpip_ctxdesc *)
   7367 	    &txq->txq_descs[txq->txq_next];
   7368 	t->tcpip_ipcs = htole32(ipcs);
   7369 	t->tcpip_tucs = htole32(tucs);
   7370 	t->tcpip_cmdlen = htole32(cmdlen);
   7371 	t->tcpip_seg = htole32(seg);
   7372 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7373 
   7374 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7375 	txs->txs_ndesc++;
   7376 
   7377 	*cmdp = cmd;
   7378 	*fieldsp = fields;
   7379 
   7380 	return 0;
   7381 }
   7382 
   7383 static inline int
   7384 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7385 {
   7386 	struct wm_softc *sc = ifp->if_softc;
   7387 	u_int cpuid = cpu_index(curcpu());
   7388 
   7389 	/*
   7390 	 * Currently, simple distribute strategy.
   7391 	 * TODO:
   7392 	 * distribute by flowid(RSS has value).
   7393 	 */
   7394 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7395 }
   7396 
   7397 /*
   7398  * wm_start:		[ifnet interface function]
   7399  *
   7400  *	Start packet transmission on the interface.
   7401  */
   7402 static void
   7403 wm_start(struct ifnet *ifp)
   7404 {
   7405 	struct wm_softc *sc = ifp->if_softc;
   7406 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7407 
   7408 #ifdef WM_MPSAFE
   7409 	KASSERT(if_is_mpsafe(ifp));
   7410 #endif
   7411 	/*
   7412 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7413 	 */
   7414 
   7415 	mutex_enter(txq->txq_lock);
   7416 	if (!txq->txq_stopping)
   7417 		wm_start_locked(ifp);
   7418 	mutex_exit(txq->txq_lock);
   7419 }
   7420 
   7421 static void
   7422 wm_start_locked(struct ifnet *ifp)
   7423 {
   7424 	struct wm_softc *sc = ifp->if_softc;
   7425 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7426 
   7427 	wm_send_common_locked(ifp, txq, false);
   7428 }
   7429 
   7430 static int
   7431 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7432 {
   7433 	int qid;
   7434 	struct wm_softc *sc = ifp->if_softc;
   7435 	struct wm_txqueue *txq;
   7436 
   7437 	qid = wm_select_txqueue(ifp, m);
   7438 	txq = &sc->sc_queue[qid].wmq_txq;
   7439 
   7440 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7441 		m_freem(m);
   7442 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7443 		return ENOBUFS;
   7444 	}
   7445 
   7446 	/*
   7447 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7448 	 */
   7449 	ifp->if_obytes += m->m_pkthdr.len;
   7450 	if (m->m_flags & M_MCAST)
   7451 		ifp->if_omcasts++;
   7452 
   7453 	if (mutex_tryenter(txq->txq_lock)) {
   7454 		if (!txq->txq_stopping)
   7455 			wm_transmit_locked(ifp, txq);
   7456 		mutex_exit(txq->txq_lock);
   7457 	}
   7458 
   7459 	return 0;
   7460 }
   7461 
   7462 static void
   7463 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7464 {
   7465 
   7466 	wm_send_common_locked(ifp, txq, true);
   7467 }
   7468 
   7469 static void
   7470 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7471     bool is_transmit)
   7472 {
   7473 	struct wm_softc *sc = ifp->if_softc;
   7474 	struct mbuf *m0;
   7475 	struct wm_txsoft *txs;
   7476 	bus_dmamap_t dmamap;
   7477 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7478 	bus_addr_t curaddr;
   7479 	bus_size_t seglen, curlen;
   7480 	uint32_t cksumcmd;
   7481 	uint8_t cksumfields;
   7482 	bool remap = true;
   7483 
   7484 	KASSERT(mutex_owned(txq->txq_lock));
   7485 
   7486 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7487 		return;
   7488 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7489 		return;
   7490 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7491 		return;
   7492 
   7493 	/* Remember the previous number of free descriptors. */
   7494 	ofree = txq->txq_free;
   7495 
   7496 	/*
   7497 	 * Loop through the send queue, setting up transmit descriptors
   7498 	 * until we drain the queue, or use up all available transmit
   7499 	 * descriptors.
   7500 	 */
   7501 	for (;;) {
   7502 		m0 = NULL;
   7503 
   7504 		/* Get a work queue entry. */
   7505 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7506 			wm_txeof(txq, UINT_MAX);
   7507 			if (txq->txq_sfree == 0) {
   7508 				DPRINTF(WM_DEBUG_TX,
   7509 				    ("%s: TX: no free job descriptors\n",
   7510 					device_xname(sc->sc_dev)));
   7511 				WM_Q_EVCNT_INCR(txq, txsstall);
   7512 				break;
   7513 			}
   7514 		}
   7515 
   7516 		/* Grab a packet off the queue. */
   7517 		if (is_transmit)
   7518 			m0 = pcq_get(txq->txq_interq);
   7519 		else
   7520 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7521 		if (m0 == NULL)
   7522 			break;
   7523 
   7524 		DPRINTF(WM_DEBUG_TX,
   7525 		    ("%s: TX: have packet to transmit: %p\n",
   7526 			device_xname(sc->sc_dev), m0));
   7527 
   7528 		txs = &txq->txq_soft[txq->txq_snext];
   7529 		dmamap = txs->txs_dmamap;
   7530 
   7531 		use_tso = (m0->m_pkthdr.csum_flags &
   7532 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7533 
   7534 		/*
   7535 		 * So says the Linux driver:
   7536 		 * The controller does a simple calculation to make sure
   7537 		 * there is enough room in the FIFO before initiating the
   7538 		 * DMA for each buffer. The calc is:
   7539 		 *	4 = ceil(buffer len / MSS)
   7540 		 * To make sure we don't overrun the FIFO, adjust the max
   7541 		 * buffer len if the MSS drops.
   7542 		 */
   7543 		dmamap->dm_maxsegsz =
   7544 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7545 		    ? m0->m_pkthdr.segsz << 2
   7546 		    : WTX_MAX_LEN;
   7547 
   7548 		/*
   7549 		 * Load the DMA map.  If this fails, the packet either
   7550 		 * didn't fit in the allotted number of segments, or we
   7551 		 * were short on resources.  For the too-many-segments
   7552 		 * case, we simply report an error and drop the packet,
   7553 		 * since we can't sanely copy a jumbo packet to a single
   7554 		 * buffer.
   7555 		 */
   7556 retry:
   7557 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7558 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7559 		if (__predict_false(error)) {
   7560 			if (error == EFBIG) {
   7561 				if (remap == true) {
   7562 					struct mbuf *m;
   7563 
   7564 					remap = false;
   7565 					m = m_defrag(m0, M_NOWAIT);
   7566 					if (m != NULL) {
   7567 						WM_Q_EVCNT_INCR(txq, defrag);
   7568 						m0 = m;
   7569 						goto retry;
   7570 					}
   7571 				}
   7572 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7573 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7574 				    "DMA segments, dropping...\n",
   7575 				    device_xname(sc->sc_dev));
   7576 				wm_dump_mbuf_chain(sc, m0);
   7577 				m_freem(m0);
   7578 				continue;
   7579 			}
   7580 			/*  Short on resources, just stop for now. */
   7581 			DPRINTF(WM_DEBUG_TX,
   7582 			    ("%s: TX: dmamap load failed: %d\n",
   7583 				device_xname(sc->sc_dev), error));
   7584 			break;
   7585 		}
   7586 
   7587 		segs_needed = dmamap->dm_nsegs;
   7588 		if (use_tso) {
   7589 			/* For sentinel descriptor; see below. */
   7590 			segs_needed++;
   7591 		}
   7592 
   7593 		/*
   7594 		 * Ensure we have enough descriptors free to describe
   7595 		 * the packet. Note, we always reserve one descriptor
   7596 		 * at the end of the ring due to the semantics of the
   7597 		 * TDT register, plus one more in the event we need
   7598 		 * to load offload context.
   7599 		 */
   7600 		if (segs_needed > txq->txq_free - 2) {
   7601 			/*
   7602 			 * Not enough free descriptors to transmit this
   7603 			 * packet.  We haven't committed anything yet,
   7604 			 * so just unload the DMA map, put the packet
   7605 			 * pack on the queue, and punt. Notify the upper
   7606 			 * layer that there are no more slots left.
   7607 			 */
   7608 			DPRINTF(WM_DEBUG_TX,
   7609 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7610 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7611 				segs_needed, txq->txq_free - 1));
   7612 			if (!is_transmit)
   7613 				ifp->if_flags |= IFF_OACTIVE;
   7614 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7615 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7616 			WM_Q_EVCNT_INCR(txq, txdstall);
   7617 			break;
   7618 		}
   7619 
   7620 		/*
   7621 		 * Check for 82547 Tx FIFO bug. We need to do this
   7622 		 * once we know we can transmit the packet, since we
   7623 		 * do some internal FIFO space accounting here.
   7624 		 */
   7625 		if (sc->sc_type == WM_T_82547 &&
   7626 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7627 			DPRINTF(WM_DEBUG_TX,
   7628 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7629 				device_xname(sc->sc_dev)));
   7630 			if (!is_transmit)
   7631 				ifp->if_flags |= IFF_OACTIVE;
   7632 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7633 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7634 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7635 			break;
   7636 		}
   7637 
   7638 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7639 
   7640 		DPRINTF(WM_DEBUG_TX,
   7641 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7642 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7643 
   7644 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7645 
   7646 		/*
   7647 		 * Store a pointer to the packet so that we can free it
   7648 		 * later.
   7649 		 *
   7650 		 * Initially, we consider the number of descriptors the
   7651 		 * packet uses the number of DMA segments.  This may be
   7652 		 * incremented by 1 if we do checksum offload (a descriptor
   7653 		 * is used to set the checksum context).
   7654 		 */
   7655 		txs->txs_mbuf = m0;
   7656 		txs->txs_firstdesc = txq->txq_next;
   7657 		txs->txs_ndesc = segs_needed;
   7658 
   7659 		/* Set up offload parameters for this packet. */
   7660 		if (m0->m_pkthdr.csum_flags &
   7661 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7662 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7663 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7664 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7665 					  &cksumfields) != 0) {
   7666 				/* Error message already displayed. */
   7667 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7668 				continue;
   7669 			}
   7670 		} else {
   7671 			cksumcmd = 0;
   7672 			cksumfields = 0;
   7673 		}
   7674 
   7675 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7676 
   7677 		/* Sync the DMA map. */
   7678 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7679 		    BUS_DMASYNC_PREWRITE);
   7680 
   7681 		/* Initialize the transmit descriptor. */
   7682 		for (nexttx = txq->txq_next, seg = 0;
   7683 		     seg < dmamap->dm_nsegs; seg++) {
   7684 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7685 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7686 			     seglen != 0;
   7687 			     curaddr += curlen, seglen -= curlen,
   7688 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7689 				curlen = seglen;
   7690 
   7691 				/*
   7692 				 * So says the Linux driver:
   7693 				 * Work around for premature descriptor
   7694 				 * write-backs in TSO mode.  Append a
   7695 				 * 4-byte sentinel descriptor.
   7696 				 */
   7697 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7698 				    curlen > 8)
   7699 					curlen -= 4;
   7700 
   7701 				wm_set_dma_addr(
   7702 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7703 				txq->txq_descs[nexttx].wtx_cmdlen
   7704 				    = htole32(cksumcmd | curlen);
   7705 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7706 				    = 0;
   7707 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7708 				    = cksumfields;
   7709 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7710 				lasttx = nexttx;
   7711 
   7712 				DPRINTF(WM_DEBUG_TX,
   7713 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7714 					"len %#04zx\n",
   7715 					device_xname(sc->sc_dev), nexttx,
   7716 					(uint64_t)curaddr, curlen));
   7717 			}
   7718 		}
   7719 
   7720 		KASSERT(lasttx != -1);
   7721 
   7722 		/*
   7723 		 * Set up the command byte on the last descriptor of
   7724 		 * the packet. If we're in the interrupt delay window,
   7725 		 * delay the interrupt.
   7726 		 */
   7727 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7728 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7729 
   7730 		/*
   7731 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7732 		 * up the descriptor to encapsulate the packet for us.
   7733 		 *
   7734 		 * This is only valid on the last descriptor of the packet.
   7735 		 */
   7736 		if (vlan_has_tag(m0)) {
   7737 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7738 			    htole32(WTX_CMD_VLE);
   7739 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7740 			    = htole16(vlan_get_tag(m0));
   7741 		}
   7742 
   7743 		txs->txs_lastdesc = lasttx;
   7744 
   7745 		DPRINTF(WM_DEBUG_TX,
   7746 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7747 			device_xname(sc->sc_dev),
   7748 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7749 
   7750 		/* Sync the descriptors we're using. */
   7751 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7752 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7753 
   7754 		/* Give the packet to the chip. */
   7755 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7756 
   7757 		DPRINTF(WM_DEBUG_TX,
   7758 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7759 
   7760 		DPRINTF(WM_DEBUG_TX,
   7761 		    ("%s: TX: finished transmitting packet, job %d\n",
   7762 			device_xname(sc->sc_dev), txq->txq_snext));
   7763 
   7764 		/* Advance the tx pointer. */
   7765 		txq->txq_free -= txs->txs_ndesc;
   7766 		txq->txq_next = nexttx;
   7767 
   7768 		txq->txq_sfree--;
   7769 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7770 
   7771 		/* Pass the packet to any BPF listeners. */
   7772 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7773 	}
   7774 
   7775 	if (m0 != NULL) {
   7776 		if (!is_transmit)
   7777 			ifp->if_flags |= IFF_OACTIVE;
   7778 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7779 		WM_Q_EVCNT_INCR(txq, descdrop);
   7780 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7781 			__func__));
   7782 		m_freem(m0);
   7783 	}
   7784 
   7785 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7786 		/* No more slots; notify upper layer. */
   7787 		if (!is_transmit)
   7788 			ifp->if_flags |= IFF_OACTIVE;
   7789 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7790 	}
   7791 
   7792 	if (txq->txq_free != ofree) {
   7793 		/* Set a watchdog timer in case the chip flakes out. */
   7794 		txq->txq_lastsent = time_uptime;
   7795 		txq->txq_sending = true;
   7796 	}
   7797 }
   7798 
   7799 /*
   7800  * wm_nq_tx_offload:
   7801  *
   7802  *	Set up TCP/IP checksumming parameters for the
   7803  *	specified packet, for NEWQUEUE devices
   7804  */
   7805 static int
   7806 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7807     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7808 {
   7809 	struct mbuf *m0 = txs->txs_mbuf;
   7810 	uint32_t vl_len, mssidx, cmdc;
   7811 	struct ether_header *eh;
   7812 	int offset, iphl;
   7813 
   7814 	/*
   7815 	 * XXX It would be nice if the mbuf pkthdr had offset
   7816 	 * fields for the protocol headers.
   7817 	 */
   7818 	*cmdlenp = 0;
   7819 	*fieldsp = 0;
   7820 
   7821 	eh = mtod(m0, struct ether_header *);
   7822 	switch (htons(eh->ether_type)) {
   7823 	case ETHERTYPE_IP:
   7824 	case ETHERTYPE_IPV6:
   7825 		offset = ETHER_HDR_LEN;
   7826 		break;
   7827 
   7828 	case ETHERTYPE_VLAN:
   7829 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7830 		break;
   7831 
   7832 	default:
   7833 		/* Don't support this protocol or encapsulation. */
   7834 		*do_csum = false;
   7835 		return 0;
   7836 	}
   7837 	*do_csum = true;
   7838 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7839 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7840 
   7841 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7842 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7843 
   7844 	if ((m0->m_pkthdr.csum_flags &
   7845 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7846 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7847 	} else {
   7848 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7849 	}
   7850 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7851 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7852 
   7853 	if (vlan_has_tag(m0)) {
   7854 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7855 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7856 		*cmdlenp |= NQTX_CMD_VLE;
   7857 	}
   7858 
   7859 	mssidx = 0;
   7860 
   7861 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7862 		int hlen = offset + iphl;
   7863 		int tcp_hlen;
   7864 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7865 
   7866 		if (__predict_false(m0->m_len <
   7867 				    (hlen + sizeof(struct tcphdr)))) {
   7868 			/*
   7869 			 * TCP/IP headers are not in the first mbuf; we need
   7870 			 * to do this the slow and painful way. Let's just
   7871 			 * hope this doesn't happen very often.
   7872 			 */
   7873 			struct tcphdr th;
   7874 
   7875 			WM_Q_EVCNT_INCR(txq, tsopain);
   7876 
   7877 			m_copydata(m0, hlen, sizeof(th), &th);
   7878 			if (v4) {
   7879 				struct ip ip;
   7880 
   7881 				m_copydata(m0, offset, sizeof(ip), &ip);
   7882 				ip.ip_len = 0;
   7883 				m_copyback(m0,
   7884 				    offset + offsetof(struct ip, ip_len),
   7885 				    sizeof(ip.ip_len), &ip.ip_len);
   7886 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7887 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7888 			} else {
   7889 				struct ip6_hdr ip6;
   7890 
   7891 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7892 				ip6.ip6_plen = 0;
   7893 				m_copyback(m0,
   7894 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7895 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7896 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7897 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7898 			}
   7899 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7900 			    sizeof(th.th_sum), &th.th_sum);
   7901 
   7902 			tcp_hlen = th.th_off << 2;
   7903 		} else {
   7904 			/*
   7905 			 * TCP/IP headers are in the first mbuf; we can do
   7906 			 * this the easy way.
   7907 			 */
   7908 			struct tcphdr *th;
   7909 
   7910 			if (v4) {
   7911 				struct ip *ip =
   7912 				    (void *)(mtod(m0, char *) + offset);
   7913 				th = (void *)(mtod(m0, char *) + hlen);
   7914 
   7915 				ip->ip_len = 0;
   7916 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7917 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7918 			} else {
   7919 				struct ip6_hdr *ip6 =
   7920 				    (void *)(mtod(m0, char *) + offset);
   7921 				th = (void *)(mtod(m0, char *) + hlen);
   7922 
   7923 				ip6->ip6_plen = 0;
   7924 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7925 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7926 			}
   7927 			tcp_hlen = th->th_off << 2;
   7928 		}
   7929 		hlen += tcp_hlen;
   7930 		*cmdlenp |= NQTX_CMD_TSE;
   7931 
   7932 		if (v4) {
   7933 			WM_Q_EVCNT_INCR(txq, tso);
   7934 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7935 		} else {
   7936 			WM_Q_EVCNT_INCR(txq, tso6);
   7937 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7938 		}
   7939 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7940 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7941 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7942 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7943 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7944 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7945 	} else {
   7946 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7947 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7948 	}
   7949 
   7950 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7951 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7952 		cmdc |= NQTXC_CMD_IP4;
   7953 	}
   7954 
   7955 	if (m0->m_pkthdr.csum_flags &
   7956 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7957 		WM_Q_EVCNT_INCR(txq, tusum);
   7958 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7959 			cmdc |= NQTXC_CMD_TCP;
   7960 		else
   7961 			cmdc |= NQTXC_CMD_UDP;
   7962 
   7963 		cmdc |= NQTXC_CMD_IP4;
   7964 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7965 	}
   7966 	if (m0->m_pkthdr.csum_flags &
   7967 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7968 		WM_Q_EVCNT_INCR(txq, tusum6);
   7969 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7970 			cmdc |= NQTXC_CMD_TCP;
   7971 		else
   7972 			cmdc |= NQTXC_CMD_UDP;
   7973 
   7974 		cmdc |= NQTXC_CMD_IP6;
   7975 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7976 	}
   7977 
   7978 	/*
   7979 	 * We don't have to write context descriptor for every packet to
   7980 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7981 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7982 	 * controllers.
   7983 	 * It would be overhead to write context descriptor for every packet,
   7984 	 * however it does not cause problems.
   7985 	 */
   7986 	/* Fill in the context descriptor. */
   7987 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7988 	    htole32(vl_len);
   7989 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7990 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7991 	    htole32(cmdc);
   7992 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7993 	    htole32(mssidx);
   7994 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7995 	DPRINTF(WM_DEBUG_TX,
   7996 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7997 		txq->txq_next, 0, vl_len));
   7998 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7999 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8000 	txs->txs_ndesc++;
   8001 	return 0;
   8002 }
   8003 
   8004 /*
   8005  * wm_nq_start:		[ifnet interface function]
   8006  *
   8007  *	Start packet transmission on the interface for NEWQUEUE devices
   8008  */
   8009 static void
   8010 wm_nq_start(struct ifnet *ifp)
   8011 {
   8012 	struct wm_softc *sc = ifp->if_softc;
   8013 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8014 
   8015 #ifdef WM_MPSAFE
   8016 	KASSERT(if_is_mpsafe(ifp));
   8017 #endif
   8018 	/*
   8019 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   8020 	 */
   8021 
   8022 	mutex_enter(txq->txq_lock);
   8023 	if (!txq->txq_stopping)
   8024 		wm_nq_start_locked(ifp);
   8025 	mutex_exit(txq->txq_lock);
   8026 }
   8027 
   8028 static void
   8029 wm_nq_start_locked(struct ifnet *ifp)
   8030 {
   8031 	struct wm_softc *sc = ifp->if_softc;
   8032 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8033 
   8034 	wm_nq_send_common_locked(ifp, txq, false);
   8035 }
   8036 
   8037 static int
   8038 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8039 {
   8040 	int qid;
   8041 	struct wm_softc *sc = ifp->if_softc;
   8042 	struct wm_txqueue *txq;
   8043 
   8044 	qid = wm_select_txqueue(ifp, m);
   8045 	txq = &sc->sc_queue[qid].wmq_txq;
   8046 
   8047 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8048 		m_freem(m);
   8049 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8050 		return ENOBUFS;
   8051 	}
   8052 
   8053 	/*
   8054 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   8055 	 */
   8056 	ifp->if_obytes += m->m_pkthdr.len;
   8057 	if (m->m_flags & M_MCAST)
   8058 		ifp->if_omcasts++;
   8059 
   8060 	/*
   8061 	 * The situations which this mutex_tryenter() fails at running time
   8062 	 * are below two patterns.
   8063 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8064 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8065 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8066 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8067 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8068 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8069 	 * stuck, either.
   8070 	 */
   8071 	if (mutex_tryenter(txq->txq_lock)) {
   8072 		if (!txq->txq_stopping)
   8073 			wm_nq_transmit_locked(ifp, txq);
   8074 		mutex_exit(txq->txq_lock);
   8075 	}
   8076 
   8077 	return 0;
   8078 }
   8079 
   8080 static void
   8081 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8082 {
   8083 
   8084 	wm_nq_send_common_locked(ifp, txq, true);
   8085 }
   8086 
   8087 static void
   8088 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8089     bool is_transmit)
   8090 {
   8091 	struct wm_softc *sc = ifp->if_softc;
   8092 	struct mbuf *m0;
   8093 	struct wm_txsoft *txs;
   8094 	bus_dmamap_t dmamap;
   8095 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8096 	bool do_csum, sent;
   8097 	bool remap = true;
   8098 
   8099 	KASSERT(mutex_owned(txq->txq_lock));
   8100 
   8101 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8102 		return;
   8103 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   8104 		return;
   8105 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8106 		return;
   8107 
   8108 	sent = false;
   8109 
   8110 	/*
   8111 	 * Loop through the send queue, setting up transmit descriptors
   8112 	 * until we drain the queue, or use up all available transmit
   8113 	 * descriptors.
   8114 	 */
   8115 	for (;;) {
   8116 		m0 = NULL;
   8117 
   8118 		/* Get a work queue entry. */
   8119 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8120 			wm_txeof(txq, UINT_MAX);
   8121 			if (txq->txq_sfree == 0) {
   8122 				DPRINTF(WM_DEBUG_TX,
   8123 				    ("%s: TX: no free job descriptors\n",
   8124 					device_xname(sc->sc_dev)));
   8125 				WM_Q_EVCNT_INCR(txq, txsstall);
   8126 				break;
   8127 			}
   8128 		}
   8129 
   8130 		/* Grab a packet off the queue. */
   8131 		if (is_transmit)
   8132 			m0 = pcq_get(txq->txq_interq);
   8133 		else
   8134 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8135 		if (m0 == NULL)
   8136 			break;
   8137 
   8138 		DPRINTF(WM_DEBUG_TX,
   8139 		    ("%s: TX: have packet to transmit: %p\n",
   8140 		    device_xname(sc->sc_dev), m0));
   8141 
   8142 		txs = &txq->txq_soft[txq->txq_snext];
   8143 		dmamap = txs->txs_dmamap;
   8144 
   8145 		/*
   8146 		 * Load the DMA map.  If this fails, the packet either
   8147 		 * didn't fit in the allotted number of segments, or we
   8148 		 * were short on resources.  For the too-many-segments
   8149 		 * case, we simply report an error and drop the packet,
   8150 		 * since we can't sanely copy a jumbo packet to a single
   8151 		 * buffer.
   8152 		 */
   8153 retry:
   8154 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8155 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8156 		if (__predict_false(error)) {
   8157 			if (error == EFBIG) {
   8158 				if (remap == true) {
   8159 					struct mbuf *m;
   8160 
   8161 					remap = false;
   8162 					m = m_defrag(m0, M_NOWAIT);
   8163 					if (m != NULL) {
   8164 						WM_Q_EVCNT_INCR(txq, defrag);
   8165 						m0 = m;
   8166 						goto retry;
   8167 					}
   8168 				}
   8169 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8170 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8171 				    "DMA segments, dropping...\n",
   8172 				    device_xname(sc->sc_dev));
   8173 				wm_dump_mbuf_chain(sc, m0);
   8174 				m_freem(m0);
   8175 				continue;
   8176 			}
   8177 			/* Short on resources, just stop for now. */
   8178 			DPRINTF(WM_DEBUG_TX,
   8179 			    ("%s: TX: dmamap load failed: %d\n",
   8180 				device_xname(sc->sc_dev), error));
   8181 			break;
   8182 		}
   8183 
   8184 		segs_needed = dmamap->dm_nsegs;
   8185 
   8186 		/*
   8187 		 * Ensure we have enough descriptors free to describe
   8188 		 * the packet. Note, we always reserve one descriptor
   8189 		 * at the end of the ring due to the semantics of the
   8190 		 * TDT register, plus one more in the event we need
   8191 		 * to load offload context.
   8192 		 */
   8193 		if (segs_needed > txq->txq_free - 2) {
   8194 			/*
   8195 			 * Not enough free descriptors to transmit this
   8196 			 * packet.  We haven't committed anything yet,
   8197 			 * so just unload the DMA map, put the packet
   8198 			 * pack on the queue, and punt. Notify the upper
   8199 			 * layer that there are no more slots left.
   8200 			 */
   8201 			DPRINTF(WM_DEBUG_TX,
   8202 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8203 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8204 				segs_needed, txq->txq_free - 1));
   8205 			if (!is_transmit)
   8206 				ifp->if_flags |= IFF_OACTIVE;
   8207 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8208 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8209 			WM_Q_EVCNT_INCR(txq, txdstall);
   8210 			break;
   8211 		}
   8212 
   8213 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8214 
   8215 		DPRINTF(WM_DEBUG_TX,
   8216 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8217 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8218 
   8219 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8220 
   8221 		/*
   8222 		 * Store a pointer to the packet so that we can free it
   8223 		 * later.
   8224 		 *
   8225 		 * Initially, we consider the number of descriptors the
   8226 		 * packet uses the number of DMA segments.  This may be
   8227 		 * incremented by 1 if we do checksum offload (a descriptor
   8228 		 * is used to set the checksum context).
   8229 		 */
   8230 		txs->txs_mbuf = m0;
   8231 		txs->txs_firstdesc = txq->txq_next;
   8232 		txs->txs_ndesc = segs_needed;
   8233 
   8234 		/* Set up offload parameters for this packet. */
   8235 		uint32_t cmdlen, fields, dcmdlen;
   8236 		if (m0->m_pkthdr.csum_flags &
   8237 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8238 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8239 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8240 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8241 			    &do_csum) != 0) {
   8242 				/* Error message already displayed. */
   8243 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8244 				continue;
   8245 			}
   8246 		} else {
   8247 			do_csum = false;
   8248 			cmdlen = 0;
   8249 			fields = 0;
   8250 		}
   8251 
   8252 		/* Sync the DMA map. */
   8253 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8254 		    BUS_DMASYNC_PREWRITE);
   8255 
   8256 		/* Initialize the first transmit descriptor. */
   8257 		nexttx = txq->txq_next;
   8258 		if (!do_csum) {
   8259 			/* setup a legacy descriptor */
   8260 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8261 			    dmamap->dm_segs[0].ds_addr);
   8262 			txq->txq_descs[nexttx].wtx_cmdlen =
   8263 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8264 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8265 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8266 			if (vlan_has_tag(m0)) {
   8267 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8268 				    htole32(WTX_CMD_VLE);
   8269 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8270 				    htole16(vlan_get_tag(m0));
   8271 			} else
   8272 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8273 
   8274 			dcmdlen = 0;
   8275 		} else {
   8276 			/* setup an advanced data descriptor */
   8277 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8278 			    htole64(dmamap->dm_segs[0].ds_addr);
   8279 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8280 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8281 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8282 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8283 			    htole32(fields);
   8284 			DPRINTF(WM_DEBUG_TX,
   8285 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8286 				device_xname(sc->sc_dev), nexttx,
   8287 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8288 			DPRINTF(WM_DEBUG_TX,
   8289 			    ("\t 0x%08x%08x\n", fields,
   8290 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8291 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8292 		}
   8293 
   8294 		lasttx = nexttx;
   8295 		nexttx = WM_NEXTTX(txq, nexttx);
   8296 		/*
   8297 		 * fill in the next descriptors. legacy or advanced format
   8298 		 * is the same here
   8299 		 */
   8300 		for (seg = 1; seg < dmamap->dm_nsegs;
   8301 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8302 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8303 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8304 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8305 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8306 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8307 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8308 			lasttx = nexttx;
   8309 
   8310 			DPRINTF(WM_DEBUG_TX,
   8311 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8312 				device_xname(sc->sc_dev), nexttx,
   8313 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8314 				dmamap->dm_segs[seg].ds_len));
   8315 		}
   8316 
   8317 		KASSERT(lasttx != -1);
   8318 
   8319 		/*
   8320 		 * Set up the command byte on the last descriptor of
   8321 		 * the packet. If we're in the interrupt delay window,
   8322 		 * delay the interrupt.
   8323 		 */
   8324 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8325 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8326 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8327 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8328 
   8329 		txs->txs_lastdesc = lasttx;
   8330 
   8331 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8332 		    device_xname(sc->sc_dev),
   8333 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8334 
   8335 		/* Sync the descriptors we're using. */
   8336 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8337 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8338 
   8339 		/* Give the packet to the chip. */
   8340 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8341 		sent = true;
   8342 
   8343 		DPRINTF(WM_DEBUG_TX,
   8344 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8345 
   8346 		DPRINTF(WM_DEBUG_TX,
   8347 		    ("%s: TX: finished transmitting packet, job %d\n",
   8348 			device_xname(sc->sc_dev), txq->txq_snext));
   8349 
   8350 		/* Advance the tx pointer. */
   8351 		txq->txq_free -= txs->txs_ndesc;
   8352 		txq->txq_next = nexttx;
   8353 
   8354 		txq->txq_sfree--;
   8355 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8356 
   8357 		/* Pass the packet to any BPF listeners. */
   8358 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8359 	}
   8360 
   8361 	if (m0 != NULL) {
   8362 		if (!is_transmit)
   8363 			ifp->if_flags |= IFF_OACTIVE;
   8364 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8365 		WM_Q_EVCNT_INCR(txq, descdrop);
   8366 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8367 			__func__));
   8368 		m_freem(m0);
   8369 	}
   8370 
   8371 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8372 		/* No more slots; notify upper layer. */
   8373 		if (!is_transmit)
   8374 			ifp->if_flags |= IFF_OACTIVE;
   8375 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8376 	}
   8377 
   8378 	if (sent) {
   8379 		/* Set a watchdog timer in case the chip flakes out. */
   8380 		txq->txq_lastsent = time_uptime;
   8381 		txq->txq_sending = true;
   8382 	}
   8383 }
   8384 
   8385 static void
   8386 wm_deferred_start_locked(struct wm_txqueue *txq)
   8387 {
   8388 	struct wm_softc *sc = txq->txq_sc;
   8389 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8390 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8391 	int qid = wmq->wmq_id;
   8392 
   8393 	KASSERT(mutex_owned(txq->txq_lock));
   8394 
   8395 	if (txq->txq_stopping) {
   8396 		mutex_exit(txq->txq_lock);
   8397 		return;
   8398 	}
   8399 
   8400 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8401 		/* XXX need for ALTQ or one CPU system */
   8402 		if (qid == 0)
   8403 			wm_nq_start_locked(ifp);
   8404 		wm_nq_transmit_locked(ifp, txq);
   8405 	} else {
   8406 		/* XXX need for ALTQ or one CPU system */
   8407 		if (qid == 0)
   8408 			wm_start_locked(ifp);
   8409 		wm_transmit_locked(ifp, txq);
   8410 	}
   8411 }
   8412 
   8413 /* Interrupt */
   8414 
   8415 /*
   8416  * wm_txeof:
   8417  *
   8418  *	Helper; handle transmit interrupts.
   8419  */
   8420 static bool
   8421 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8422 {
   8423 	struct wm_softc *sc = txq->txq_sc;
   8424 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8425 	struct wm_txsoft *txs;
   8426 	int count = 0;
   8427 	int i;
   8428 	uint8_t status;
   8429 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8430 	bool more = false;
   8431 
   8432 	KASSERT(mutex_owned(txq->txq_lock));
   8433 
   8434 	if (txq->txq_stopping)
   8435 		return false;
   8436 
   8437 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8438 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8439 	if (wmq->wmq_id == 0)
   8440 		ifp->if_flags &= ~IFF_OACTIVE;
   8441 
   8442 	/*
   8443 	 * Go through the Tx list and free mbufs for those
   8444 	 * frames which have been transmitted.
   8445 	 */
   8446 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8447 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8448 		if (limit-- == 0) {
   8449 			more = true;
   8450 			DPRINTF(WM_DEBUG_TX,
   8451 			    ("%s: TX: loop limited, job %d is not processed\n",
   8452 				device_xname(sc->sc_dev), i));
   8453 			break;
   8454 		}
   8455 
   8456 		txs = &txq->txq_soft[i];
   8457 
   8458 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8459 			device_xname(sc->sc_dev), i));
   8460 
   8461 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8462 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8463 
   8464 		status =
   8465 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8466 		if ((status & WTX_ST_DD) == 0) {
   8467 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8468 			    BUS_DMASYNC_PREREAD);
   8469 			break;
   8470 		}
   8471 
   8472 		count++;
   8473 		DPRINTF(WM_DEBUG_TX,
   8474 		    ("%s: TX: job %d done: descs %d..%d\n",
   8475 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8476 		    txs->txs_lastdesc));
   8477 
   8478 		/*
   8479 		 * XXX We should probably be using the statistics
   8480 		 * XXX registers, but I don't know if they exist
   8481 		 * XXX on chips before the i82544.
   8482 		 */
   8483 
   8484 #ifdef WM_EVENT_COUNTERS
   8485 		if (status & WTX_ST_TU)
   8486 			WM_Q_EVCNT_INCR(txq, underrun);
   8487 #endif /* WM_EVENT_COUNTERS */
   8488 
   8489 		/*
   8490 		 * 82574 and newer's document says the status field has neither
   8491 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8492 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8493 		 * Developer's Manual", 82574 datasheet and newer.
   8494 		 *
   8495 		 * XXX I saw the LC bit was set on I218 even though the media
   8496 		 * was full duplex, so the bit might be used for other
   8497 		 * meaning ...(I have no document).
   8498 		 */
   8499 
   8500 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8501 		    && ((sc->sc_type < WM_T_82574)
   8502 			|| (sc->sc_type == WM_T_80003))) {
   8503 			ifp->if_oerrors++;
   8504 			if (status & WTX_ST_LC)
   8505 				log(LOG_WARNING, "%s: late collision\n",
   8506 				    device_xname(sc->sc_dev));
   8507 			else if (status & WTX_ST_EC) {
   8508 				ifp->if_collisions +=
   8509 				    TX_COLLISION_THRESHOLD + 1;
   8510 				log(LOG_WARNING, "%s: excessive collisions\n",
   8511 				    device_xname(sc->sc_dev));
   8512 			}
   8513 		} else
   8514 			ifp->if_opackets++;
   8515 
   8516 		txq->txq_packets++;
   8517 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8518 
   8519 		txq->txq_free += txs->txs_ndesc;
   8520 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8521 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8522 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8523 		m_freem(txs->txs_mbuf);
   8524 		txs->txs_mbuf = NULL;
   8525 	}
   8526 
   8527 	/* Update the dirty transmit buffer pointer. */
   8528 	txq->txq_sdirty = i;
   8529 	DPRINTF(WM_DEBUG_TX,
   8530 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8531 
   8532 	if (count != 0)
   8533 		rnd_add_uint32(&sc->rnd_source, count);
   8534 
   8535 	/*
   8536 	 * If there are no more pending transmissions, cancel the watchdog
   8537 	 * timer.
   8538 	 */
   8539 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8540 		txq->txq_sending = false;
   8541 
   8542 	return more;
   8543 }
   8544 
   8545 static inline uint32_t
   8546 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8547 {
   8548 	struct wm_softc *sc = rxq->rxq_sc;
   8549 
   8550 	if (sc->sc_type == WM_T_82574)
   8551 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8552 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8553 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8554 	else
   8555 		return rxq->rxq_descs[idx].wrx_status;
   8556 }
   8557 
   8558 static inline uint32_t
   8559 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8560 {
   8561 	struct wm_softc *sc = rxq->rxq_sc;
   8562 
   8563 	if (sc->sc_type == WM_T_82574)
   8564 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8565 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8566 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8567 	else
   8568 		return rxq->rxq_descs[idx].wrx_errors;
   8569 }
   8570 
   8571 static inline uint16_t
   8572 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8573 {
   8574 	struct wm_softc *sc = rxq->rxq_sc;
   8575 
   8576 	if (sc->sc_type == WM_T_82574)
   8577 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8578 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8579 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8580 	else
   8581 		return rxq->rxq_descs[idx].wrx_special;
   8582 }
   8583 
   8584 static inline int
   8585 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8586 {
   8587 	struct wm_softc *sc = rxq->rxq_sc;
   8588 
   8589 	if (sc->sc_type == WM_T_82574)
   8590 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8591 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8592 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8593 	else
   8594 		return rxq->rxq_descs[idx].wrx_len;
   8595 }
   8596 
   8597 #ifdef WM_DEBUG
   8598 static inline uint32_t
   8599 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8600 {
   8601 	struct wm_softc *sc = rxq->rxq_sc;
   8602 
   8603 	if (sc->sc_type == WM_T_82574)
   8604 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8605 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8606 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8607 	else
   8608 		return 0;
   8609 }
   8610 
   8611 static inline uint8_t
   8612 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8613 {
   8614 	struct wm_softc *sc = rxq->rxq_sc;
   8615 
   8616 	if (sc->sc_type == WM_T_82574)
   8617 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8618 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8619 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8620 	else
   8621 		return 0;
   8622 }
   8623 #endif /* WM_DEBUG */
   8624 
   8625 static inline bool
   8626 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8627     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8628 {
   8629 
   8630 	if (sc->sc_type == WM_T_82574)
   8631 		return (status & ext_bit) != 0;
   8632 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8633 		return (status & nq_bit) != 0;
   8634 	else
   8635 		return (status & legacy_bit) != 0;
   8636 }
   8637 
   8638 static inline bool
   8639 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8640     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8641 {
   8642 
   8643 	if (sc->sc_type == WM_T_82574)
   8644 		return (error & ext_bit) != 0;
   8645 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8646 		return (error & nq_bit) != 0;
   8647 	else
   8648 		return (error & legacy_bit) != 0;
   8649 }
   8650 
   8651 static inline bool
   8652 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8653 {
   8654 
   8655 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8656 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8657 		return true;
   8658 	else
   8659 		return false;
   8660 }
   8661 
   8662 static inline bool
   8663 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8664 {
   8665 	struct wm_softc *sc = rxq->rxq_sc;
   8666 
   8667 	/* XXXX missing error bit for newqueue? */
   8668 	if (wm_rxdesc_is_set_error(sc, errors,
   8669 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8670 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8671 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8672 		NQRXC_ERROR_RXE)) {
   8673 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8674 		    EXTRXC_ERROR_SE, 0))
   8675 			log(LOG_WARNING, "%s: symbol error\n",
   8676 			    device_xname(sc->sc_dev));
   8677 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8678 		    EXTRXC_ERROR_SEQ, 0))
   8679 			log(LOG_WARNING, "%s: receive sequence error\n",
   8680 			    device_xname(sc->sc_dev));
   8681 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8682 		    EXTRXC_ERROR_CE, 0))
   8683 			log(LOG_WARNING, "%s: CRC error\n",
   8684 			    device_xname(sc->sc_dev));
   8685 		return true;
   8686 	}
   8687 
   8688 	return false;
   8689 }
   8690 
   8691 static inline bool
   8692 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8693 {
   8694 	struct wm_softc *sc = rxq->rxq_sc;
   8695 
   8696 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8697 		NQRXC_STATUS_DD)) {
   8698 		/* We have processed all of the receive descriptors. */
   8699 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8700 		return false;
   8701 	}
   8702 
   8703 	return true;
   8704 }
   8705 
   8706 static inline bool
   8707 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8708     uint16_t vlantag, struct mbuf *m)
   8709 {
   8710 
   8711 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8712 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8713 		vlan_set_tag(m, le16toh(vlantag));
   8714 	}
   8715 
   8716 	return true;
   8717 }
   8718 
   8719 static inline void
   8720 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8721     uint32_t errors, struct mbuf *m)
   8722 {
   8723 	struct wm_softc *sc = rxq->rxq_sc;
   8724 
   8725 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8726 		if (wm_rxdesc_is_set_status(sc, status,
   8727 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8728 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8729 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8730 			if (wm_rxdesc_is_set_error(sc, errors,
   8731 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8732 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8733 		}
   8734 		if (wm_rxdesc_is_set_status(sc, status,
   8735 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8736 			/*
   8737 			 * Note: we don't know if this was TCP or UDP,
   8738 			 * so we just set both bits, and expect the
   8739 			 * upper layers to deal.
   8740 			 */
   8741 			WM_Q_EVCNT_INCR(rxq, tusum);
   8742 			m->m_pkthdr.csum_flags |=
   8743 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8744 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8745 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8746 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8747 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8748 		}
   8749 	}
   8750 }
   8751 
   8752 /*
   8753  * wm_rxeof:
   8754  *
   8755  *	Helper; handle receive interrupts.
   8756  */
   8757 static bool
   8758 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8759 {
   8760 	struct wm_softc *sc = rxq->rxq_sc;
   8761 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8762 	struct wm_rxsoft *rxs;
   8763 	struct mbuf *m;
   8764 	int i, len;
   8765 	int count = 0;
   8766 	uint32_t status, errors;
   8767 	uint16_t vlantag;
   8768 	bool more = false;
   8769 
   8770 	KASSERT(mutex_owned(rxq->rxq_lock));
   8771 
   8772 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8773 		if (limit-- == 0) {
   8774 			rxq->rxq_ptr = i;
   8775 			more = true;
   8776 			DPRINTF(WM_DEBUG_RX,
   8777 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8778 				device_xname(sc->sc_dev), i));
   8779 			break;
   8780 		}
   8781 
   8782 		rxs = &rxq->rxq_soft[i];
   8783 
   8784 		DPRINTF(WM_DEBUG_RX,
   8785 		    ("%s: RX: checking descriptor %d\n",
   8786 			device_xname(sc->sc_dev), i));
   8787 		wm_cdrxsync(rxq, i,
   8788 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8789 
   8790 		status = wm_rxdesc_get_status(rxq, i);
   8791 		errors = wm_rxdesc_get_errors(rxq, i);
   8792 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8793 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8794 #ifdef WM_DEBUG
   8795 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8796 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8797 #endif
   8798 
   8799 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8800 			/*
   8801 			 * Update the receive pointer holding rxq_lock
   8802 			 * consistent with increment counter.
   8803 			 */
   8804 			rxq->rxq_ptr = i;
   8805 			break;
   8806 		}
   8807 
   8808 		count++;
   8809 		if (__predict_false(rxq->rxq_discard)) {
   8810 			DPRINTF(WM_DEBUG_RX,
   8811 			    ("%s: RX: discarding contents of descriptor %d\n",
   8812 				device_xname(sc->sc_dev), i));
   8813 			wm_init_rxdesc(rxq, i);
   8814 			if (wm_rxdesc_is_eop(rxq, status)) {
   8815 				/* Reset our state. */
   8816 				DPRINTF(WM_DEBUG_RX,
   8817 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8818 					device_xname(sc->sc_dev)));
   8819 				rxq->rxq_discard = 0;
   8820 			}
   8821 			continue;
   8822 		}
   8823 
   8824 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8825 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8826 
   8827 		m = rxs->rxs_mbuf;
   8828 
   8829 		/*
   8830 		 * Add a new receive buffer to the ring, unless of
   8831 		 * course the length is zero. Treat the latter as a
   8832 		 * failed mapping.
   8833 		 */
   8834 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8835 			/*
   8836 			 * Failed, throw away what we've done so
   8837 			 * far, and discard the rest of the packet.
   8838 			 */
   8839 			ifp->if_ierrors++;
   8840 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8841 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8842 			wm_init_rxdesc(rxq, i);
   8843 			if (!wm_rxdesc_is_eop(rxq, status))
   8844 				rxq->rxq_discard = 1;
   8845 			if (rxq->rxq_head != NULL)
   8846 				m_freem(rxq->rxq_head);
   8847 			WM_RXCHAIN_RESET(rxq);
   8848 			DPRINTF(WM_DEBUG_RX,
   8849 			    ("%s: RX: Rx buffer allocation failed, "
   8850 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8851 				rxq->rxq_discard ? " (discard)" : ""));
   8852 			continue;
   8853 		}
   8854 
   8855 		m->m_len = len;
   8856 		rxq->rxq_len += len;
   8857 		DPRINTF(WM_DEBUG_RX,
   8858 		    ("%s: RX: buffer at %p len %d\n",
   8859 			device_xname(sc->sc_dev), m->m_data, len));
   8860 
   8861 		/* If this is not the end of the packet, keep looking. */
   8862 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8863 			WM_RXCHAIN_LINK(rxq, m);
   8864 			DPRINTF(WM_DEBUG_RX,
   8865 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8866 				device_xname(sc->sc_dev), rxq->rxq_len));
   8867 			continue;
   8868 		}
   8869 
   8870 		/*
   8871 		 * Okay, we have the entire packet now. The chip is
   8872 		 * configured to include the FCS except I350 and I21[01]
   8873 		 * (not all chips can be configured to strip it),
   8874 		 * so we need to trim it.
   8875 		 * May need to adjust length of previous mbuf in the
   8876 		 * chain if the current mbuf is too short.
   8877 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8878 		 * is always set in I350, so we don't trim it.
   8879 		 */
   8880 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8881 		    && (sc->sc_type != WM_T_I210)
   8882 		    && (sc->sc_type != WM_T_I211)) {
   8883 			if (m->m_len < ETHER_CRC_LEN) {
   8884 				rxq->rxq_tail->m_len
   8885 				    -= (ETHER_CRC_LEN - m->m_len);
   8886 				m->m_len = 0;
   8887 			} else
   8888 				m->m_len -= ETHER_CRC_LEN;
   8889 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8890 		} else
   8891 			len = rxq->rxq_len;
   8892 
   8893 		WM_RXCHAIN_LINK(rxq, m);
   8894 
   8895 		*rxq->rxq_tailp = NULL;
   8896 		m = rxq->rxq_head;
   8897 
   8898 		WM_RXCHAIN_RESET(rxq);
   8899 
   8900 		DPRINTF(WM_DEBUG_RX,
   8901 		    ("%s: RX: have entire packet, len -> %d\n",
   8902 			device_xname(sc->sc_dev), len));
   8903 
   8904 		/* If an error occurred, update stats and drop the packet. */
   8905 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8906 			m_freem(m);
   8907 			continue;
   8908 		}
   8909 
   8910 		/* No errors.  Receive the packet. */
   8911 		m_set_rcvif(m, ifp);
   8912 		m->m_pkthdr.len = len;
   8913 		/*
   8914 		 * TODO
   8915 		 * should be save rsshash and rsstype to this mbuf.
   8916 		 */
   8917 		DPRINTF(WM_DEBUG_RX,
   8918 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8919 			device_xname(sc->sc_dev), rsstype, rsshash));
   8920 
   8921 		/*
   8922 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8923 		 * for us.  Associate the tag with the packet.
   8924 		 */
   8925 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8926 			continue;
   8927 
   8928 		/* Set up checksum info for this packet. */
   8929 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8930 		/*
   8931 		 * Update the receive pointer holding rxq_lock consistent with
   8932 		 * increment counter.
   8933 		 */
   8934 		rxq->rxq_ptr = i;
   8935 		rxq->rxq_packets++;
   8936 		rxq->rxq_bytes += len;
   8937 		mutex_exit(rxq->rxq_lock);
   8938 
   8939 		/* Pass it on. */
   8940 		if_percpuq_enqueue(sc->sc_ipq, m);
   8941 
   8942 		mutex_enter(rxq->rxq_lock);
   8943 
   8944 		if (rxq->rxq_stopping)
   8945 			break;
   8946 	}
   8947 
   8948 	if (count != 0)
   8949 		rnd_add_uint32(&sc->rnd_source, count);
   8950 
   8951 	DPRINTF(WM_DEBUG_RX,
   8952 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8953 
   8954 	return more;
   8955 }
   8956 
   8957 /*
   8958  * wm_linkintr_gmii:
   8959  *
   8960  *	Helper; handle link interrupts for GMII.
   8961  */
   8962 static void
   8963 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8964 {
   8965 	device_t dev = sc->sc_dev;
   8966 	uint32_t status, reg;
   8967 	bool link;
   8968 	int rv;
   8969 
   8970 	KASSERT(WM_CORE_LOCKED(sc));
   8971 
   8972 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   8973 		__func__));
   8974 
   8975 	if ((icr & ICR_LSC) == 0) {
   8976 		if (icr & ICR_RXSEQ)
   8977 			DPRINTF(WM_DEBUG_LINK,
   8978 			    ("%s: LINK Receive sequence error\n",
   8979 				device_xname(dev)));
   8980 		return;
   8981 	}
   8982 
   8983 	/* Link status changed */
   8984 	status = CSR_READ(sc, WMREG_STATUS);
   8985 	link = status & STATUS_LU;
   8986 	if (link)
   8987 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8988 			device_xname(dev),
   8989 			(status & STATUS_FD) ? "FDX" : "HDX"));
   8990 	else
   8991 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8992 			device_xname(dev)));
   8993 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   8994 		wm_gig_downshift_workaround_ich8lan(sc);
   8995 
   8996 	if ((sc->sc_type == WM_T_ICH8)
   8997 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8998 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8999 	}
   9000 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9001 		device_xname(dev)));
   9002 	mii_pollstat(&sc->sc_mii);
   9003 	if (sc->sc_type == WM_T_82543) {
   9004 		int miistatus, active;
   9005 
   9006 		/*
   9007 		 * With 82543, we need to force speed and
   9008 		 * duplex on the MAC equal to what the PHY
   9009 		 * speed and duplex configuration is.
   9010 		 */
   9011 		miistatus = sc->sc_mii.mii_media_status;
   9012 
   9013 		if (miistatus & IFM_ACTIVE) {
   9014 			active = sc->sc_mii.mii_media_active;
   9015 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9016 			switch (IFM_SUBTYPE(active)) {
   9017 			case IFM_10_T:
   9018 				sc->sc_ctrl |= CTRL_SPEED_10;
   9019 				break;
   9020 			case IFM_100_TX:
   9021 				sc->sc_ctrl |= CTRL_SPEED_100;
   9022 				break;
   9023 			case IFM_1000_T:
   9024 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9025 				break;
   9026 			default:
   9027 				/*
   9028 				 * fiber?
   9029 				 * Shoud not enter here.
   9030 				 */
   9031 				printf("unknown media (%x)\n", active);
   9032 				break;
   9033 			}
   9034 			if (active & IFM_FDX)
   9035 				sc->sc_ctrl |= CTRL_FD;
   9036 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9037 		}
   9038 	} else if (sc->sc_type == WM_T_PCH) {
   9039 		wm_k1_gig_workaround_hv(sc,
   9040 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9041 	}
   9042 
   9043 	/*
   9044 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9045 	 * aggressive resulting in many collisions. To avoid this, increase
   9046 	 * the IPG and reduce Rx latency in the PHY.
   9047 	 */
   9048 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9049 	    && link) {
   9050 		uint32_t tipg_reg;
   9051 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9052 		bool fdx;
   9053 		uint16_t emi_addr, emi_val;
   9054 
   9055 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9056 		tipg_reg &= ~TIPG_IPGT_MASK;
   9057 		fdx = status & STATUS_FD;
   9058 
   9059 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9060 			tipg_reg |= 0xff;
   9061 			/* Reduce Rx latency in analog PHY */
   9062 			emi_val = 0;
   9063 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9064 		    fdx && speed != STATUS_SPEED_1000) {
   9065 			tipg_reg |= 0xc;
   9066 			emi_val = 1;
   9067 		} else {
   9068 			/* Roll back the default values */
   9069 			tipg_reg |= 0x08;
   9070 			emi_val = 1;
   9071 		}
   9072 
   9073 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9074 
   9075 		rv = sc->phy.acquire(sc);
   9076 		if (rv)
   9077 			return;
   9078 
   9079 		if (sc->sc_type == WM_T_PCH2)
   9080 			emi_addr = I82579_RX_CONFIG;
   9081 		else
   9082 			emi_addr = I217_RX_CONFIG;
   9083 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9084 
   9085 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9086 			uint16_t phy_reg;
   9087 
   9088 			sc->phy.readreg_locked(dev, 2,
   9089 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9090 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9091 			if (speed == STATUS_SPEED_100
   9092 			    || speed == STATUS_SPEED_10)
   9093 				phy_reg |= 0x3e8;
   9094 			else
   9095 				phy_reg |= 0xfa;
   9096 			sc->phy.writereg_locked(dev, 2,
   9097 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9098 
   9099 			if (speed == STATUS_SPEED_1000) {
   9100 				sc->phy.readreg_locked(dev, 2,
   9101 				    HV_PM_CTRL, &phy_reg);
   9102 
   9103 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9104 
   9105 				sc->phy.writereg_locked(dev, 2,
   9106 				    HV_PM_CTRL, phy_reg);
   9107 			}
   9108 		}
   9109 		sc->phy.release(sc);
   9110 
   9111 		if (rv)
   9112 			return;
   9113 
   9114 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9115 			uint16_t data, ptr_gap;
   9116 
   9117 			if (speed == STATUS_SPEED_1000) {
   9118 				rv = sc->phy.acquire(sc);
   9119 				if (rv)
   9120 					return;
   9121 
   9122 				rv = sc->phy.readreg_locked(dev, 2,
   9123 				    I219_UNKNOWN1, &data);
   9124 				if (rv) {
   9125 					sc->phy.release(sc);
   9126 					return;
   9127 				}
   9128 
   9129 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9130 				if (ptr_gap < 0x18) {
   9131 					data &= ~(0x3ff << 2);
   9132 					data |= (0x18 << 2);
   9133 					rv = sc->phy.writereg_locked(dev,
   9134 					    2, I219_UNKNOWN1, data);
   9135 				}
   9136 				sc->phy.release(sc);
   9137 				if (rv)
   9138 					return;
   9139 			} else {
   9140 				rv = sc->phy.acquire(sc);
   9141 				if (rv)
   9142 					return;
   9143 
   9144 				rv = sc->phy.writereg_locked(dev, 2,
   9145 				    I219_UNKNOWN1, 0xc023);
   9146 				sc->phy.release(sc);
   9147 				if (rv)
   9148 					return;
   9149 
   9150 			}
   9151 		}
   9152 	}
   9153 
   9154 	/*
   9155 	 * I217 Packet Loss issue:
   9156 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9157 	 * on power up.
   9158 	 * Set the Beacon Duration for I217 to 8 usec
   9159 	 */
   9160 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9161 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9162 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9163 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9164 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9165 	}
   9166 
   9167 	/* Work-around I218 hang issue */
   9168 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9169 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9170 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9171 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9172 		wm_k1_workaround_lpt_lp(sc, link);
   9173 
   9174 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9175 		/*
   9176 		 * Set platform power management values for Latency
   9177 		 * Tolerance Reporting (LTR)
   9178 		 */
   9179 		wm_platform_pm_pch_lpt(sc,
   9180 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9181 	}
   9182 
   9183 	/* Clear link partner's EEE ability */
   9184 	sc->eee_lp_ability = 0;
   9185 
   9186 	/* FEXTNVM6 K1-off workaround */
   9187 	if (sc->sc_type == WM_T_PCH_SPT) {
   9188 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9189 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9190 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9191 		else
   9192 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9193 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9194 	}
   9195 
   9196 	if (!link)
   9197 		return;
   9198 
   9199 	switch (sc->sc_type) {
   9200 	case WM_T_PCH2:
   9201 		wm_k1_workaround_lv(sc);
   9202 		/* FALLTHROUGH */
   9203 	case WM_T_PCH:
   9204 		if (sc->sc_phytype == WMPHY_82578)
   9205 			wm_link_stall_workaround_hv(sc);
   9206 		break;
   9207 	default:
   9208 		break;
   9209 	}
   9210 
   9211 	/* Enable/Disable EEE after link up */
   9212 	if (sc->sc_phytype > WMPHY_82579)
   9213 		wm_set_eee_pchlan(sc);
   9214 }
   9215 
   9216 /*
   9217  * wm_linkintr_tbi:
   9218  *
   9219  *	Helper; handle link interrupts for TBI mode.
   9220  */
   9221 static void
   9222 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9223 {
   9224 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9225 	uint32_t status;
   9226 
   9227 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9228 		__func__));
   9229 
   9230 	status = CSR_READ(sc, WMREG_STATUS);
   9231 	if (icr & ICR_LSC) {
   9232 		wm_check_for_link(sc);
   9233 		if (status & STATUS_LU) {
   9234 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9235 				device_xname(sc->sc_dev),
   9236 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9237 			/*
   9238 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9239 			 * so we should update sc->sc_ctrl
   9240 			 */
   9241 
   9242 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9243 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9244 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9245 			if (status & STATUS_FD)
   9246 				sc->sc_tctl |=
   9247 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9248 			else
   9249 				sc->sc_tctl |=
   9250 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9251 			if (sc->sc_ctrl & CTRL_TFCE)
   9252 				sc->sc_fcrtl |= FCRTL_XONE;
   9253 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9254 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9255 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9256 			sc->sc_tbi_linkup = 1;
   9257 			if_link_state_change(ifp, LINK_STATE_UP);
   9258 		} else {
   9259 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9260 				device_xname(sc->sc_dev)));
   9261 			sc->sc_tbi_linkup = 0;
   9262 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9263 		}
   9264 		/* Update LED */
   9265 		wm_tbi_serdes_set_linkled(sc);
   9266 	} else if (icr & ICR_RXSEQ)
   9267 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9268 			device_xname(sc->sc_dev)));
   9269 }
   9270 
   9271 /*
   9272  * wm_linkintr_serdes:
   9273  *
   9274  *	Helper; handle link interrupts for TBI mode.
   9275  */
   9276 static void
   9277 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9278 {
   9279 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9280 	struct mii_data *mii = &sc->sc_mii;
   9281 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9282 	uint32_t pcs_adv, pcs_lpab, reg;
   9283 
   9284 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9285 		__func__));
   9286 
   9287 	if (icr & ICR_LSC) {
   9288 		/* Check PCS */
   9289 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9290 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9291 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9292 				device_xname(sc->sc_dev)));
   9293 			mii->mii_media_status |= IFM_ACTIVE;
   9294 			sc->sc_tbi_linkup = 1;
   9295 			if_link_state_change(ifp, LINK_STATE_UP);
   9296 		} else {
   9297 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9298 				device_xname(sc->sc_dev)));
   9299 			mii->mii_media_status |= IFM_NONE;
   9300 			sc->sc_tbi_linkup = 0;
   9301 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9302 			wm_tbi_serdes_set_linkled(sc);
   9303 			return;
   9304 		}
   9305 		mii->mii_media_active |= IFM_1000_SX;
   9306 		if ((reg & PCS_LSTS_FDX) != 0)
   9307 			mii->mii_media_active |= IFM_FDX;
   9308 		else
   9309 			mii->mii_media_active |= IFM_HDX;
   9310 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9311 			/* Check flow */
   9312 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9313 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9314 				DPRINTF(WM_DEBUG_LINK,
   9315 				    ("XXX LINKOK but not ACOMP\n"));
   9316 				return;
   9317 			}
   9318 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9319 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9320 			DPRINTF(WM_DEBUG_LINK,
   9321 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9322 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9323 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9324 				mii->mii_media_active |= IFM_FLOW
   9325 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9326 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9327 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9328 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9329 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9330 				mii->mii_media_active |= IFM_FLOW
   9331 				    | IFM_ETH_TXPAUSE;
   9332 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9333 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9334 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9335 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9336 				mii->mii_media_active |= IFM_FLOW
   9337 				    | IFM_ETH_RXPAUSE;
   9338 		}
   9339 		/* Update LED */
   9340 		wm_tbi_serdes_set_linkled(sc);
   9341 	} else
   9342 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9343 		    device_xname(sc->sc_dev)));
   9344 }
   9345 
   9346 /*
   9347  * wm_linkintr:
   9348  *
   9349  *	Helper; handle link interrupts.
   9350  */
   9351 static void
   9352 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9353 {
   9354 
   9355 	KASSERT(WM_CORE_LOCKED(sc));
   9356 
   9357 	if (sc->sc_flags & WM_F_HAS_MII)
   9358 		wm_linkintr_gmii(sc, icr);
   9359 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9360 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9361 		wm_linkintr_serdes(sc, icr);
   9362 	else
   9363 		wm_linkintr_tbi(sc, icr);
   9364 }
   9365 
   9366 /*
   9367  * wm_intr_legacy:
   9368  *
   9369  *	Interrupt service routine for INTx and MSI.
   9370  */
   9371 static int
   9372 wm_intr_legacy(void *arg)
   9373 {
   9374 	struct wm_softc *sc = arg;
   9375 	struct wm_queue *wmq = &sc->sc_queue[0];
   9376 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9377 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9378 	uint32_t icr, rndval = 0;
   9379 	int handled = 0;
   9380 
   9381 	while (1 /* CONSTCOND */) {
   9382 		icr = CSR_READ(sc, WMREG_ICR);
   9383 		if ((icr & sc->sc_icr) == 0)
   9384 			break;
   9385 		if (handled == 0)
   9386 			DPRINTF(WM_DEBUG_TX,
   9387 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9388 		if (rndval == 0)
   9389 			rndval = icr;
   9390 
   9391 		mutex_enter(rxq->rxq_lock);
   9392 
   9393 		if (rxq->rxq_stopping) {
   9394 			mutex_exit(rxq->rxq_lock);
   9395 			break;
   9396 		}
   9397 
   9398 		handled = 1;
   9399 
   9400 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9401 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9402 			DPRINTF(WM_DEBUG_RX,
   9403 			    ("%s: RX: got Rx intr 0x%08x\n",
   9404 				device_xname(sc->sc_dev),
   9405 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9406 			WM_Q_EVCNT_INCR(rxq, intr);
   9407 		}
   9408 #endif
   9409 		/*
   9410 		 * wm_rxeof() does *not* call upper layer functions directly,
   9411 		 * as if_percpuq_enqueue() just call softint_schedule().
   9412 		 * So, we can call wm_rxeof() in interrupt context.
   9413 		 */
   9414 		wm_rxeof(rxq, UINT_MAX);
   9415 
   9416 		mutex_exit(rxq->rxq_lock);
   9417 		mutex_enter(txq->txq_lock);
   9418 
   9419 		if (txq->txq_stopping) {
   9420 			mutex_exit(txq->txq_lock);
   9421 			break;
   9422 		}
   9423 
   9424 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9425 		if (icr & ICR_TXDW) {
   9426 			DPRINTF(WM_DEBUG_TX,
   9427 			    ("%s: TX: got TXDW interrupt\n",
   9428 				device_xname(sc->sc_dev)));
   9429 			WM_Q_EVCNT_INCR(txq, txdw);
   9430 		}
   9431 #endif
   9432 		wm_txeof(txq, UINT_MAX);
   9433 
   9434 		mutex_exit(txq->txq_lock);
   9435 		WM_CORE_LOCK(sc);
   9436 
   9437 		if (sc->sc_core_stopping) {
   9438 			WM_CORE_UNLOCK(sc);
   9439 			break;
   9440 		}
   9441 
   9442 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9443 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9444 			wm_linkintr(sc, icr);
   9445 		}
   9446 
   9447 		WM_CORE_UNLOCK(sc);
   9448 
   9449 		if (icr & ICR_RXO) {
   9450 #if defined(WM_DEBUG)
   9451 			log(LOG_WARNING, "%s: Receive overrun\n",
   9452 			    device_xname(sc->sc_dev));
   9453 #endif /* defined(WM_DEBUG) */
   9454 		}
   9455 	}
   9456 
   9457 	rnd_add_uint32(&sc->rnd_source, rndval);
   9458 
   9459 	if (handled) {
   9460 		/* Try to get more packets going. */
   9461 		softint_schedule(wmq->wmq_si);
   9462 	}
   9463 
   9464 	return handled;
   9465 }
   9466 
   9467 static inline void
   9468 wm_txrxintr_disable(struct wm_queue *wmq)
   9469 {
   9470 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9471 
   9472 	if (sc->sc_type == WM_T_82574)
   9473 		CSR_WRITE(sc, WMREG_IMC,
   9474 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9475 	else if (sc->sc_type == WM_T_82575)
   9476 		CSR_WRITE(sc, WMREG_EIMC,
   9477 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9478 	else
   9479 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9480 }
   9481 
   9482 static inline void
   9483 wm_txrxintr_enable(struct wm_queue *wmq)
   9484 {
   9485 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9486 
   9487 	wm_itrs_calculate(sc, wmq);
   9488 
   9489 	/*
   9490 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9491 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9492 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9493 	 * while each wm_handle_queue(wmq) is runnig.
   9494 	 */
   9495 	if (sc->sc_type == WM_T_82574)
   9496 		CSR_WRITE(sc, WMREG_IMS,
   9497 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9498 	else if (sc->sc_type == WM_T_82575)
   9499 		CSR_WRITE(sc, WMREG_EIMS,
   9500 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9501 	else
   9502 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9503 }
   9504 
   9505 static int
   9506 wm_txrxintr_msix(void *arg)
   9507 {
   9508 	struct wm_queue *wmq = arg;
   9509 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9510 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9511 	struct wm_softc *sc = txq->txq_sc;
   9512 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9513 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9514 	bool txmore;
   9515 	bool rxmore;
   9516 
   9517 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9518 
   9519 	DPRINTF(WM_DEBUG_TX,
   9520 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9521 
   9522 	wm_txrxintr_disable(wmq);
   9523 
   9524 	mutex_enter(txq->txq_lock);
   9525 
   9526 	if (txq->txq_stopping) {
   9527 		mutex_exit(txq->txq_lock);
   9528 		return 0;
   9529 	}
   9530 
   9531 	WM_Q_EVCNT_INCR(txq, txdw);
   9532 	txmore = wm_txeof(txq, txlimit);
   9533 	/* wm_deferred start() is done in wm_handle_queue(). */
   9534 	mutex_exit(txq->txq_lock);
   9535 
   9536 	DPRINTF(WM_DEBUG_RX,
   9537 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9538 	mutex_enter(rxq->rxq_lock);
   9539 
   9540 	if (rxq->rxq_stopping) {
   9541 		mutex_exit(rxq->rxq_lock);
   9542 		return 0;
   9543 	}
   9544 
   9545 	WM_Q_EVCNT_INCR(rxq, intr);
   9546 	rxmore = wm_rxeof(rxq, rxlimit);
   9547 	mutex_exit(rxq->rxq_lock);
   9548 
   9549 	wm_itrs_writereg(sc, wmq);
   9550 
   9551 	if (txmore || rxmore)
   9552 		softint_schedule(wmq->wmq_si);
   9553 	else
   9554 		wm_txrxintr_enable(wmq);
   9555 
   9556 	return 1;
   9557 }
   9558 
   9559 static void
   9560 wm_handle_queue(void *arg)
   9561 {
   9562 	struct wm_queue *wmq = arg;
   9563 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9564 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9565 	struct wm_softc *sc = txq->txq_sc;
   9566 	u_int txlimit = sc->sc_tx_process_limit;
   9567 	u_int rxlimit = sc->sc_rx_process_limit;
   9568 	bool txmore;
   9569 	bool rxmore;
   9570 
   9571 	mutex_enter(txq->txq_lock);
   9572 	if (txq->txq_stopping) {
   9573 		mutex_exit(txq->txq_lock);
   9574 		return;
   9575 	}
   9576 	txmore = wm_txeof(txq, txlimit);
   9577 	wm_deferred_start_locked(txq);
   9578 	mutex_exit(txq->txq_lock);
   9579 
   9580 	mutex_enter(rxq->rxq_lock);
   9581 	if (rxq->rxq_stopping) {
   9582 		mutex_exit(rxq->rxq_lock);
   9583 		return;
   9584 	}
   9585 	WM_Q_EVCNT_INCR(rxq, defer);
   9586 	rxmore = wm_rxeof(rxq, rxlimit);
   9587 	mutex_exit(rxq->rxq_lock);
   9588 
   9589 	if (txmore || rxmore)
   9590 		softint_schedule(wmq->wmq_si);
   9591 	else
   9592 		wm_txrxintr_enable(wmq);
   9593 }
   9594 
   9595 /*
   9596  * wm_linkintr_msix:
   9597  *
   9598  *	Interrupt service routine for link status change for MSI-X.
   9599  */
   9600 static int
   9601 wm_linkintr_msix(void *arg)
   9602 {
   9603 	struct wm_softc *sc = arg;
   9604 	uint32_t reg;
   9605 	bool has_rxo;
   9606 
   9607 	DPRINTF(WM_DEBUG_LINK,
   9608 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9609 
   9610 	reg = CSR_READ(sc, WMREG_ICR);
   9611 	WM_CORE_LOCK(sc);
   9612 	if (sc->sc_core_stopping)
   9613 		goto out;
   9614 
   9615 	if ((reg & ICR_LSC) != 0) {
   9616 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9617 		wm_linkintr(sc, ICR_LSC);
   9618 	}
   9619 
   9620 	/*
   9621 	 * XXX 82574 MSI-X mode workaround
   9622 	 *
   9623 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9624 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9625 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9626 	 * interrupts by writing WMREG_ICS to process receive packets.
   9627 	 */
   9628 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9629 #if defined(WM_DEBUG)
   9630 		log(LOG_WARNING, "%s: Receive overrun\n",
   9631 		    device_xname(sc->sc_dev));
   9632 #endif /* defined(WM_DEBUG) */
   9633 
   9634 		has_rxo = true;
   9635 		/*
   9636 		 * The RXO interrupt is very high rate when receive traffic is
   9637 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9638 		 * interrupts. ICR_OTHER will be enabled at the end of
   9639 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9640 		 * ICR_RXQ(1) interrupts.
   9641 		 */
   9642 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9643 
   9644 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9645 	}
   9646 
   9647 
   9648 
   9649 out:
   9650 	WM_CORE_UNLOCK(sc);
   9651 
   9652 	if (sc->sc_type == WM_T_82574) {
   9653 		if (!has_rxo)
   9654 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9655 		else
   9656 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9657 	} else if (sc->sc_type == WM_T_82575)
   9658 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9659 	else
   9660 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9661 
   9662 	return 1;
   9663 }
   9664 
   9665 /*
   9666  * Media related.
   9667  * GMII, SGMII, TBI (and SERDES)
   9668  */
   9669 
   9670 /* Common */
   9671 
   9672 /*
   9673  * wm_tbi_serdes_set_linkled:
   9674  *
   9675  *	Update the link LED on TBI and SERDES devices.
   9676  */
   9677 static void
   9678 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9679 {
   9680 
   9681 	if (sc->sc_tbi_linkup)
   9682 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9683 	else
   9684 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9685 
   9686 	/* 82540 or newer devices are active low */
   9687 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9688 
   9689 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9690 }
   9691 
   9692 /* GMII related */
   9693 
   9694 /*
   9695  * wm_gmii_reset:
   9696  *
   9697  *	Reset the PHY.
   9698  */
   9699 static void
   9700 wm_gmii_reset(struct wm_softc *sc)
   9701 {
   9702 	uint32_t reg;
   9703 	int rv;
   9704 
   9705 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9706 		device_xname(sc->sc_dev), __func__));
   9707 
   9708 	rv = sc->phy.acquire(sc);
   9709 	if (rv != 0) {
   9710 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9711 		    __func__);
   9712 		return;
   9713 	}
   9714 
   9715 	switch (sc->sc_type) {
   9716 	case WM_T_82542_2_0:
   9717 	case WM_T_82542_2_1:
   9718 		/* null */
   9719 		break;
   9720 	case WM_T_82543:
   9721 		/*
   9722 		 * With 82543, we need to force speed and duplex on the MAC
   9723 		 * equal to what the PHY speed and duplex configuration is.
   9724 		 * In addition, we need to perform a hardware reset on the PHY
   9725 		 * to take it out of reset.
   9726 		 */
   9727 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9728 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9729 
   9730 		/* The PHY reset pin is active-low. */
   9731 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9732 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9733 		    CTRL_EXT_SWDPIN(4));
   9734 		reg |= CTRL_EXT_SWDPIO(4);
   9735 
   9736 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9737 		CSR_WRITE_FLUSH(sc);
   9738 		delay(10*1000);
   9739 
   9740 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9741 		CSR_WRITE_FLUSH(sc);
   9742 		delay(150);
   9743 #if 0
   9744 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9745 #endif
   9746 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9747 		break;
   9748 	case WM_T_82544:	/* reset 10000us */
   9749 	case WM_T_82540:
   9750 	case WM_T_82545:
   9751 	case WM_T_82545_3:
   9752 	case WM_T_82546:
   9753 	case WM_T_82546_3:
   9754 	case WM_T_82541:
   9755 	case WM_T_82541_2:
   9756 	case WM_T_82547:
   9757 	case WM_T_82547_2:
   9758 	case WM_T_82571:	/* reset 100us */
   9759 	case WM_T_82572:
   9760 	case WM_T_82573:
   9761 	case WM_T_82574:
   9762 	case WM_T_82575:
   9763 	case WM_T_82576:
   9764 	case WM_T_82580:
   9765 	case WM_T_I350:
   9766 	case WM_T_I354:
   9767 	case WM_T_I210:
   9768 	case WM_T_I211:
   9769 	case WM_T_82583:
   9770 	case WM_T_80003:
   9771 		/* generic reset */
   9772 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9773 		CSR_WRITE_FLUSH(sc);
   9774 		delay(20000);
   9775 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9776 		CSR_WRITE_FLUSH(sc);
   9777 		delay(20000);
   9778 
   9779 		if ((sc->sc_type == WM_T_82541)
   9780 		    || (sc->sc_type == WM_T_82541_2)
   9781 		    || (sc->sc_type == WM_T_82547)
   9782 		    || (sc->sc_type == WM_T_82547_2)) {
   9783 			/* workaround for igp are done in igp_reset() */
   9784 			/* XXX add code to set LED after phy reset */
   9785 		}
   9786 		break;
   9787 	case WM_T_ICH8:
   9788 	case WM_T_ICH9:
   9789 	case WM_T_ICH10:
   9790 	case WM_T_PCH:
   9791 	case WM_T_PCH2:
   9792 	case WM_T_PCH_LPT:
   9793 	case WM_T_PCH_SPT:
   9794 	case WM_T_PCH_CNP:
   9795 		/* generic reset */
   9796 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9797 		CSR_WRITE_FLUSH(sc);
   9798 		delay(100);
   9799 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9800 		CSR_WRITE_FLUSH(sc);
   9801 		delay(150);
   9802 		break;
   9803 	default:
   9804 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9805 		    __func__);
   9806 		break;
   9807 	}
   9808 
   9809 	sc->phy.release(sc);
   9810 
   9811 	/* get_cfg_done */
   9812 	wm_get_cfg_done(sc);
   9813 
   9814 	/* extra setup */
   9815 	switch (sc->sc_type) {
   9816 	case WM_T_82542_2_0:
   9817 	case WM_T_82542_2_1:
   9818 	case WM_T_82543:
   9819 	case WM_T_82544:
   9820 	case WM_T_82540:
   9821 	case WM_T_82545:
   9822 	case WM_T_82545_3:
   9823 	case WM_T_82546:
   9824 	case WM_T_82546_3:
   9825 	case WM_T_82541_2:
   9826 	case WM_T_82547_2:
   9827 	case WM_T_82571:
   9828 	case WM_T_82572:
   9829 	case WM_T_82573:
   9830 	case WM_T_82574:
   9831 	case WM_T_82583:
   9832 	case WM_T_82575:
   9833 	case WM_T_82576:
   9834 	case WM_T_82580:
   9835 	case WM_T_I350:
   9836 	case WM_T_I354:
   9837 	case WM_T_I210:
   9838 	case WM_T_I211:
   9839 	case WM_T_80003:
   9840 		/* null */
   9841 		break;
   9842 	case WM_T_82541:
   9843 	case WM_T_82547:
   9844 		/* XXX Configure actively LED after PHY reset */
   9845 		break;
   9846 	case WM_T_ICH8:
   9847 	case WM_T_ICH9:
   9848 	case WM_T_ICH10:
   9849 	case WM_T_PCH:
   9850 	case WM_T_PCH2:
   9851 	case WM_T_PCH_LPT:
   9852 	case WM_T_PCH_SPT:
   9853 	case WM_T_PCH_CNP:
   9854 		wm_phy_post_reset(sc);
   9855 		break;
   9856 	default:
   9857 		panic("%s: unknown type\n", __func__);
   9858 		break;
   9859 	}
   9860 }
   9861 
   9862 /*
   9863  * Setup sc_phytype and mii_{read|write}reg.
   9864  *
   9865  *  To identify PHY type, correct read/write function should be selected.
   9866  * To select correct read/write function, PCI ID or MAC type are required
   9867  * without accessing PHY registers.
   9868  *
   9869  *  On the first call of this function, PHY ID is not known yet. Check
   9870  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9871  * result might be incorrect.
   9872  *
   9873  *  In the second call, PHY OUI and model is used to identify PHY type.
   9874  * It might not be perfpect because of the lack of compared entry, but it
   9875  * would be better than the first call.
   9876  *
   9877  *  If the detected new result and previous assumption is different,
   9878  * diagnous message will be printed.
   9879  */
   9880 static void
   9881 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9882     uint16_t phy_model)
   9883 {
   9884 	device_t dev = sc->sc_dev;
   9885 	struct mii_data *mii = &sc->sc_mii;
   9886 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9887 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9888 	mii_readreg_t new_readreg;
   9889 	mii_writereg_t new_writereg;
   9890 
   9891 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9892 		device_xname(sc->sc_dev), __func__));
   9893 
   9894 	if (mii->mii_readreg == NULL) {
   9895 		/*
   9896 		 *  This is the first call of this function. For ICH and PCH
   9897 		 * variants, it's difficult to determine the PHY access method
   9898 		 * by sc_type, so use the PCI product ID for some devices.
   9899 		 */
   9900 
   9901 		switch (sc->sc_pcidevid) {
   9902 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9903 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9904 			/* 82577 */
   9905 			new_phytype = WMPHY_82577;
   9906 			break;
   9907 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9908 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9909 			/* 82578 */
   9910 			new_phytype = WMPHY_82578;
   9911 			break;
   9912 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9913 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9914 			/* 82579 */
   9915 			new_phytype = WMPHY_82579;
   9916 			break;
   9917 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9918 		case PCI_PRODUCT_INTEL_82801I_BM:
   9919 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9920 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9921 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9922 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9923 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9924 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9925 			/* ICH8, 9, 10 with 82567 */
   9926 			new_phytype = WMPHY_BM;
   9927 			break;
   9928 		default:
   9929 			break;
   9930 		}
   9931 	} else {
   9932 		/* It's not the first call. Use PHY OUI and model */
   9933 		switch (phy_oui) {
   9934 		case MII_OUI_ATTANSIC: /* XXX ??? */
   9935 			switch (phy_model) {
   9936 			case 0x0004: /* XXX */
   9937 				new_phytype = WMPHY_82578;
   9938 				break;
   9939 			default:
   9940 				break;
   9941 			}
   9942 			break;
   9943 		case MII_OUI_xxMARVELL:
   9944 			switch (phy_model) {
   9945 			case MII_MODEL_xxMARVELL_I210:
   9946 				new_phytype = WMPHY_I210;
   9947 				break;
   9948 			case MII_MODEL_xxMARVELL_E1011:
   9949 			case MII_MODEL_xxMARVELL_E1000_3:
   9950 			case MII_MODEL_xxMARVELL_E1000_5:
   9951 			case MII_MODEL_xxMARVELL_E1112:
   9952 				new_phytype = WMPHY_M88;
   9953 				break;
   9954 			case MII_MODEL_xxMARVELL_E1149:
   9955 				new_phytype = WMPHY_BM;
   9956 				break;
   9957 			case MII_MODEL_xxMARVELL_E1111:
   9958 			case MII_MODEL_xxMARVELL_I347:
   9959 			case MII_MODEL_xxMARVELL_E1512:
   9960 			case MII_MODEL_xxMARVELL_E1340M:
   9961 			case MII_MODEL_xxMARVELL_E1543:
   9962 				new_phytype = WMPHY_M88;
   9963 				break;
   9964 			case MII_MODEL_xxMARVELL_I82563:
   9965 				new_phytype = WMPHY_GG82563;
   9966 				break;
   9967 			default:
   9968 				break;
   9969 			}
   9970 			break;
   9971 		case MII_OUI_INTEL:
   9972 			switch (phy_model) {
   9973 			case MII_MODEL_INTEL_I82577:
   9974 				new_phytype = WMPHY_82577;
   9975 				break;
   9976 			case MII_MODEL_INTEL_I82579:
   9977 				new_phytype = WMPHY_82579;
   9978 				break;
   9979 			case MII_MODEL_INTEL_I217:
   9980 				new_phytype = WMPHY_I217;
   9981 				break;
   9982 			case MII_MODEL_INTEL_I82580:
   9983 			case MII_MODEL_INTEL_I350:
   9984 				new_phytype = WMPHY_82580;
   9985 				break;
   9986 			default:
   9987 				break;
   9988 			}
   9989 			break;
   9990 		case MII_OUI_yyINTEL:
   9991 			switch (phy_model) {
   9992 			case MII_MODEL_yyINTEL_I82562G:
   9993 			case MII_MODEL_yyINTEL_I82562EM:
   9994 			case MII_MODEL_yyINTEL_I82562ET:
   9995 				new_phytype = WMPHY_IFE;
   9996 				break;
   9997 			case MII_MODEL_yyINTEL_IGP01E1000:
   9998 				new_phytype = WMPHY_IGP;
   9999 				break;
   10000 			case MII_MODEL_yyINTEL_I82566:
   10001 				new_phytype = WMPHY_IGP_3;
   10002 				break;
   10003 			default:
   10004 				break;
   10005 			}
   10006 			break;
   10007 		default:
   10008 			break;
   10009 		}
   10010 		if (new_phytype == WMPHY_UNKNOWN)
   10011 			aprint_verbose_dev(dev,
   10012 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
   10013 			    __func__, phy_oui, phy_model);
   10014 
   10015 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10016 		    && (sc->sc_phytype != new_phytype )) {
   10017 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10018 			    "was incorrect. PHY type from PHY ID = %u\n",
   10019 			    sc->sc_phytype, new_phytype);
   10020 		}
   10021 	}
   10022 
   10023 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10024 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10025 		/* SGMII */
   10026 		new_readreg = wm_sgmii_readreg;
   10027 		new_writereg = wm_sgmii_writereg;
   10028 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10029 		/* BM2 (phyaddr == 1) */
   10030 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10031 		    && (new_phytype != WMPHY_BM)
   10032 		    && (new_phytype != WMPHY_UNKNOWN))
   10033 			doubt_phytype = new_phytype;
   10034 		new_phytype = WMPHY_BM;
   10035 		new_readreg = wm_gmii_bm_readreg;
   10036 		new_writereg = wm_gmii_bm_writereg;
   10037 	} else if (sc->sc_type >= WM_T_PCH) {
   10038 		/* All PCH* use _hv_ */
   10039 		new_readreg = wm_gmii_hv_readreg;
   10040 		new_writereg = wm_gmii_hv_writereg;
   10041 	} else if (sc->sc_type >= WM_T_ICH8) {
   10042 		/* non-82567 ICH8, 9 and 10 */
   10043 		new_readreg = wm_gmii_i82544_readreg;
   10044 		new_writereg = wm_gmii_i82544_writereg;
   10045 	} else if (sc->sc_type >= WM_T_80003) {
   10046 		/* 80003 */
   10047 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10048 		    && (new_phytype != WMPHY_GG82563)
   10049 		    && (new_phytype != WMPHY_UNKNOWN))
   10050 			doubt_phytype = new_phytype;
   10051 		new_phytype = WMPHY_GG82563;
   10052 		new_readreg = wm_gmii_i80003_readreg;
   10053 		new_writereg = wm_gmii_i80003_writereg;
   10054 	} else if (sc->sc_type >= WM_T_I210) {
   10055 		/* I210 and I211 */
   10056 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10057 		    && (new_phytype != WMPHY_I210)
   10058 		    && (new_phytype != WMPHY_UNKNOWN))
   10059 			doubt_phytype = new_phytype;
   10060 		new_phytype = WMPHY_I210;
   10061 		new_readreg = wm_gmii_gs40g_readreg;
   10062 		new_writereg = wm_gmii_gs40g_writereg;
   10063 	} else if (sc->sc_type >= WM_T_82580) {
   10064 		/* 82580, I350 and I354 */
   10065 		new_readreg = wm_gmii_82580_readreg;
   10066 		new_writereg = wm_gmii_82580_writereg;
   10067 	} else if (sc->sc_type >= WM_T_82544) {
   10068 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10069 		new_readreg = wm_gmii_i82544_readreg;
   10070 		new_writereg = wm_gmii_i82544_writereg;
   10071 	} else {
   10072 		new_readreg = wm_gmii_i82543_readreg;
   10073 		new_writereg = wm_gmii_i82543_writereg;
   10074 	}
   10075 
   10076 	if (new_phytype == WMPHY_BM) {
   10077 		/* All BM use _bm_ */
   10078 		new_readreg = wm_gmii_bm_readreg;
   10079 		new_writereg = wm_gmii_bm_writereg;
   10080 	}
   10081 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10082 		/* All PCH* use _hv_ */
   10083 		new_readreg = wm_gmii_hv_readreg;
   10084 		new_writereg = wm_gmii_hv_writereg;
   10085 	}
   10086 
   10087 	/* Diag output */
   10088 	if (doubt_phytype != WMPHY_UNKNOWN)
   10089 		aprint_error_dev(dev, "Assumed new PHY type was "
   10090 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10091 		    new_phytype);
   10092 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10093 	    && (sc->sc_phytype != new_phytype ))
   10094 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10095 		    "was incorrect. New PHY type = %u\n",
   10096 		    sc->sc_phytype, new_phytype);
   10097 
   10098 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   10099 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   10100 
   10101 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   10102 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   10103 		    "function was incorrect.\n");
   10104 
   10105 	/* Update now */
   10106 	sc->sc_phytype = new_phytype;
   10107 	mii->mii_readreg = new_readreg;
   10108 	mii->mii_writereg = new_writereg;
   10109 	if (new_readreg == wm_gmii_hv_readreg) {
   10110 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10111 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10112 	} else if (new_readreg == wm_sgmii_readreg) {
   10113 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10114 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10115 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10116 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10117 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10118 	}
   10119 }
   10120 
   10121 /*
   10122  * wm_get_phy_id_82575:
   10123  *
   10124  * Return PHY ID. Return -1 if it failed.
   10125  */
   10126 static int
   10127 wm_get_phy_id_82575(struct wm_softc *sc)
   10128 {
   10129 	uint32_t reg;
   10130 	int phyid = -1;
   10131 
   10132 	/* XXX */
   10133 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10134 		return -1;
   10135 
   10136 	if (wm_sgmii_uses_mdio(sc)) {
   10137 		switch (sc->sc_type) {
   10138 		case WM_T_82575:
   10139 		case WM_T_82576:
   10140 			reg = CSR_READ(sc, WMREG_MDIC);
   10141 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10142 			break;
   10143 		case WM_T_82580:
   10144 		case WM_T_I350:
   10145 		case WM_T_I354:
   10146 		case WM_T_I210:
   10147 		case WM_T_I211:
   10148 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10149 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10150 			break;
   10151 		default:
   10152 			return -1;
   10153 		}
   10154 	}
   10155 
   10156 	return phyid;
   10157 }
   10158 
   10159 
   10160 /*
   10161  * wm_gmii_mediainit:
   10162  *
   10163  *	Initialize media for use on 1000BASE-T devices.
   10164  */
   10165 static void
   10166 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10167 {
   10168 	device_t dev = sc->sc_dev;
   10169 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10170 	struct mii_data *mii = &sc->sc_mii;
   10171 	uint32_t reg;
   10172 
   10173 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10174 		device_xname(sc->sc_dev), __func__));
   10175 
   10176 	/* We have GMII. */
   10177 	sc->sc_flags |= WM_F_HAS_MII;
   10178 
   10179 	if (sc->sc_type == WM_T_80003)
   10180 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10181 	else
   10182 		sc->sc_tipg = TIPG_1000T_DFLT;
   10183 
   10184 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10185 	if ((sc->sc_type == WM_T_82580)
   10186 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10187 	    || (sc->sc_type == WM_T_I211)) {
   10188 		reg = CSR_READ(sc, WMREG_PHPM);
   10189 		reg &= ~PHPM_GO_LINK_D;
   10190 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10191 	}
   10192 
   10193 	/*
   10194 	 * Let the chip set speed/duplex on its own based on
   10195 	 * signals from the PHY.
   10196 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10197 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10198 	 */
   10199 	sc->sc_ctrl |= CTRL_SLU;
   10200 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10201 
   10202 	/* Initialize our media structures and probe the GMII. */
   10203 	mii->mii_ifp = ifp;
   10204 
   10205 	mii->mii_statchg = wm_gmii_statchg;
   10206 
   10207 	/* get PHY control from SMBus to PCIe */
   10208 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10209 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10210 	    || (sc->sc_type == WM_T_PCH_CNP))
   10211 		wm_init_phy_workarounds_pchlan(sc);
   10212 
   10213 	wm_gmii_reset(sc);
   10214 
   10215 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10216 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10217 	    wm_gmii_mediastatus);
   10218 
   10219 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10220 	    || (sc->sc_type == WM_T_82580)
   10221 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10222 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10223 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10224 			/* Attach only one port */
   10225 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10226 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10227 		} else {
   10228 			int i, id;
   10229 			uint32_t ctrl_ext;
   10230 
   10231 			id = wm_get_phy_id_82575(sc);
   10232 			if (id != -1) {
   10233 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10234 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10235 			}
   10236 			if ((id == -1)
   10237 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10238 				/* Power on sgmii phy if it is disabled */
   10239 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10240 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10241 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10242 				CSR_WRITE_FLUSH(sc);
   10243 				delay(300*1000); /* XXX too long */
   10244 
   10245 				/* from 1 to 8 */
   10246 				for (i = 1; i < 8; i++)
   10247 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10248 					    0xffffffff, i, MII_OFFSET_ANY,
   10249 					    MIIF_DOPAUSE);
   10250 
   10251 				/* restore previous sfp cage power state */
   10252 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10253 			}
   10254 		}
   10255 	} else
   10256 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10257 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10258 
   10259 	/*
   10260 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10261 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10262 	 */
   10263 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10264 		|| (sc->sc_type == WM_T_PCH_SPT)
   10265 		|| (sc->sc_type == WM_T_PCH_CNP))
   10266 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10267 		wm_set_mdio_slow_mode_hv(sc);
   10268 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10269 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10270 	}
   10271 
   10272 	/*
   10273 	 * (For ICH8 variants)
   10274 	 * If PHY detection failed, use BM's r/w function and retry.
   10275 	 */
   10276 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10277 		/* if failed, retry with *_bm_* */
   10278 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10279 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10280 		    sc->sc_phytype);
   10281 		sc->sc_phytype = WMPHY_BM;
   10282 		mii->mii_readreg = wm_gmii_bm_readreg;
   10283 		mii->mii_writereg = wm_gmii_bm_writereg;
   10284 
   10285 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10286 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10287 	}
   10288 
   10289 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10290 		/* Any PHY wasn't find */
   10291 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10292 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10293 		sc->sc_phytype = WMPHY_NONE;
   10294 	} else {
   10295 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10296 
   10297 		/*
   10298 		 * PHY Found! Check PHY type again by the second call of
   10299 		 * wm_gmii_setup_phytype.
   10300 		 */
   10301 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10302 		    child->mii_mpd_model);
   10303 
   10304 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10305 	}
   10306 }
   10307 
   10308 /*
   10309  * wm_gmii_mediachange:	[ifmedia interface function]
   10310  *
   10311  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10312  */
   10313 static int
   10314 wm_gmii_mediachange(struct ifnet *ifp)
   10315 {
   10316 	struct wm_softc *sc = ifp->if_softc;
   10317 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10318 	int rc;
   10319 
   10320 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10321 		device_xname(sc->sc_dev), __func__));
   10322 	if ((ifp->if_flags & IFF_UP) == 0)
   10323 		return 0;
   10324 
   10325 	/* Disable D0 LPLU. */
   10326 	wm_lplu_d0_disable(sc);
   10327 
   10328 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10329 	sc->sc_ctrl |= CTRL_SLU;
   10330 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10331 	    || (sc->sc_type > WM_T_82543)) {
   10332 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10333 	} else {
   10334 		sc->sc_ctrl &= ~CTRL_ASDE;
   10335 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10336 		if (ife->ifm_media & IFM_FDX)
   10337 			sc->sc_ctrl |= CTRL_FD;
   10338 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10339 		case IFM_10_T:
   10340 			sc->sc_ctrl |= CTRL_SPEED_10;
   10341 			break;
   10342 		case IFM_100_TX:
   10343 			sc->sc_ctrl |= CTRL_SPEED_100;
   10344 			break;
   10345 		case IFM_1000_T:
   10346 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10347 			break;
   10348 		case IFM_NONE:
   10349 			/* There is no specific setting for IFM_NONE */
   10350 			break;
   10351 		default:
   10352 			panic("wm_gmii_mediachange: bad media 0x%x",
   10353 			    ife->ifm_media);
   10354 		}
   10355 	}
   10356 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10357 	CSR_WRITE_FLUSH(sc);
   10358 	if (sc->sc_type <= WM_T_82543)
   10359 		wm_gmii_reset(sc);
   10360 
   10361 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10362 		return 0;
   10363 	return rc;
   10364 }
   10365 
   10366 /*
   10367  * wm_gmii_mediastatus:	[ifmedia interface function]
   10368  *
   10369  *	Get the current interface media status on a 1000BASE-T device.
   10370  */
   10371 static void
   10372 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10373 {
   10374 	struct wm_softc *sc = ifp->if_softc;
   10375 
   10376 	ether_mediastatus(ifp, ifmr);
   10377 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10378 	    | sc->sc_flowflags;
   10379 }
   10380 
   10381 #define	MDI_IO		CTRL_SWDPIN(2)
   10382 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10383 #define	MDI_CLK		CTRL_SWDPIN(3)
   10384 
   10385 static void
   10386 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10387 {
   10388 	uint32_t i, v;
   10389 
   10390 	v = CSR_READ(sc, WMREG_CTRL);
   10391 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10392 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10393 
   10394 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   10395 		if (data & i)
   10396 			v |= MDI_IO;
   10397 		else
   10398 			v &= ~MDI_IO;
   10399 		CSR_WRITE(sc, WMREG_CTRL, v);
   10400 		CSR_WRITE_FLUSH(sc);
   10401 		delay(10);
   10402 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10403 		CSR_WRITE_FLUSH(sc);
   10404 		delay(10);
   10405 		CSR_WRITE(sc, WMREG_CTRL, v);
   10406 		CSR_WRITE_FLUSH(sc);
   10407 		delay(10);
   10408 	}
   10409 }
   10410 
   10411 static uint16_t
   10412 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10413 {
   10414 	uint32_t v, i;
   10415 	uint16_t data = 0;
   10416 
   10417 	v = CSR_READ(sc, WMREG_CTRL);
   10418 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10419 	v |= CTRL_SWDPIO(3);
   10420 
   10421 	CSR_WRITE(sc, WMREG_CTRL, v);
   10422 	CSR_WRITE_FLUSH(sc);
   10423 	delay(10);
   10424 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10425 	CSR_WRITE_FLUSH(sc);
   10426 	delay(10);
   10427 	CSR_WRITE(sc, WMREG_CTRL, v);
   10428 	CSR_WRITE_FLUSH(sc);
   10429 	delay(10);
   10430 
   10431 	for (i = 0; i < 16; i++) {
   10432 		data <<= 1;
   10433 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10434 		CSR_WRITE_FLUSH(sc);
   10435 		delay(10);
   10436 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10437 			data |= 1;
   10438 		CSR_WRITE(sc, WMREG_CTRL, v);
   10439 		CSR_WRITE_FLUSH(sc);
   10440 		delay(10);
   10441 	}
   10442 
   10443 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10444 	CSR_WRITE_FLUSH(sc);
   10445 	delay(10);
   10446 	CSR_WRITE(sc, WMREG_CTRL, v);
   10447 	CSR_WRITE_FLUSH(sc);
   10448 	delay(10);
   10449 
   10450 	return data;
   10451 }
   10452 
   10453 #undef MDI_IO
   10454 #undef MDI_DIR
   10455 #undef MDI_CLK
   10456 
   10457 /*
   10458  * wm_gmii_i82543_readreg:	[mii interface function]
   10459  *
   10460  *	Read a PHY register on the GMII (i82543 version).
   10461  */
   10462 static int
   10463 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10464 {
   10465 	struct wm_softc *sc = device_private(dev);
   10466 
   10467 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10468 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10469 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10470 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10471 
   10472 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10473 		device_xname(dev), phy, reg, *val));
   10474 
   10475 	return 0;
   10476 }
   10477 
   10478 /*
   10479  * wm_gmii_i82543_writereg:	[mii interface function]
   10480  *
   10481  *	Write a PHY register on the GMII (i82543 version).
   10482  */
   10483 static int
   10484 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10485 {
   10486 	struct wm_softc *sc = device_private(dev);
   10487 
   10488 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10489 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10490 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10491 	    (MII_COMMAND_START << 30), 32);
   10492 
   10493 	return 0;
   10494 }
   10495 
   10496 /*
   10497  * wm_gmii_mdic_readreg:	[mii interface function]
   10498  *
   10499  *	Read a PHY register on the GMII.
   10500  */
   10501 static int
   10502 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10503 {
   10504 	struct wm_softc *sc = device_private(dev);
   10505 	uint32_t mdic = 0;
   10506 	int i;
   10507 
   10508 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10509 	    && (reg > MII_ADDRMASK)) {
   10510 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10511 		    __func__, sc->sc_phytype, reg);
   10512 		reg &= MII_ADDRMASK;
   10513 	}
   10514 
   10515 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10516 	    MDIC_REGADD(reg));
   10517 
   10518 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10519 		delay(50);
   10520 		mdic = CSR_READ(sc, WMREG_MDIC);
   10521 		if (mdic & MDIC_READY)
   10522 			break;
   10523 	}
   10524 
   10525 	if ((mdic & MDIC_READY) == 0) {
   10526 		DPRINTF(WM_DEBUG_GMII,
   10527 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10528 			device_xname(dev), phy, reg));
   10529 		return ETIMEDOUT;
   10530 	} else if (mdic & MDIC_E) {
   10531 		/* This is normal if no PHY is present. */
   10532 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10533 			device_xname(sc->sc_dev), phy, reg));
   10534 		return -1;
   10535 	} else
   10536 		*val = MDIC_DATA(mdic);
   10537 
   10538 	/*
   10539 	 * Allow some time after each MDIC transaction to avoid
   10540 	 * reading duplicate data in the next MDIC transaction.
   10541 	 */
   10542 	if (sc->sc_type == WM_T_PCH2)
   10543 		delay(100);
   10544 
   10545 	return 0;
   10546 }
   10547 
   10548 /*
   10549  * wm_gmii_mdic_writereg:	[mii interface function]
   10550  *
   10551  *	Write a PHY register on the GMII.
   10552  */
   10553 static int
   10554 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10555 {
   10556 	struct wm_softc *sc = device_private(dev);
   10557 	uint32_t mdic = 0;
   10558 	int i;
   10559 
   10560 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10561 	    && (reg > MII_ADDRMASK)) {
   10562 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10563 		    __func__, sc->sc_phytype, reg);
   10564 		reg &= MII_ADDRMASK;
   10565 	}
   10566 
   10567 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10568 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10569 
   10570 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10571 		delay(50);
   10572 		mdic = CSR_READ(sc, WMREG_MDIC);
   10573 		if (mdic & MDIC_READY)
   10574 			break;
   10575 	}
   10576 
   10577 	if ((mdic & MDIC_READY) == 0) {
   10578 		DPRINTF(WM_DEBUG_GMII,
   10579 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10580 			device_xname(dev), phy, reg));
   10581 		return ETIMEDOUT;
   10582 	} else if (mdic & MDIC_E) {
   10583 		DPRINTF(WM_DEBUG_GMII,
   10584 		    ("%s: MDIC write error: phy %d reg %d\n",
   10585 			device_xname(dev), phy, reg));
   10586 		return -1;
   10587 	}
   10588 
   10589 	/*
   10590 	 * Allow some time after each MDIC transaction to avoid
   10591 	 * reading duplicate data in the next MDIC transaction.
   10592 	 */
   10593 	if (sc->sc_type == WM_T_PCH2)
   10594 		delay(100);
   10595 
   10596 	return 0;
   10597 }
   10598 
   10599 /*
   10600  * wm_gmii_i82544_readreg:	[mii interface function]
   10601  *
   10602  *	Read a PHY register on the GMII.
   10603  */
   10604 static int
   10605 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10606 {
   10607 	struct wm_softc *sc = device_private(dev);
   10608 	int rv;
   10609 
   10610 	if (sc->phy.acquire(sc)) {
   10611 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10612 		return -1;
   10613 	}
   10614 
   10615 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10616 
   10617 	sc->phy.release(sc);
   10618 
   10619 	return rv;
   10620 }
   10621 
   10622 static int
   10623 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10624 {
   10625 	struct wm_softc *sc = device_private(dev);
   10626 
   10627 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10628 		switch (sc->sc_phytype) {
   10629 		case WMPHY_IGP:
   10630 		case WMPHY_IGP_2:
   10631 		case WMPHY_IGP_3:
   10632 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10633 			    reg);
   10634 			break;
   10635 		default:
   10636 #ifdef WM_DEBUG
   10637 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10638 			    __func__, sc->sc_phytype, reg);
   10639 #endif
   10640 			break;
   10641 		}
   10642 	}
   10643 
   10644 	wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10645 
   10646 	return 0;
   10647 }
   10648 
   10649 /*
   10650  * wm_gmii_i82544_writereg:	[mii interface function]
   10651  *
   10652  *	Write a PHY register on the GMII.
   10653  */
   10654 static int
   10655 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10656 {
   10657 	struct wm_softc *sc = device_private(dev);
   10658 	int rv;
   10659 
   10660 	if (sc->phy.acquire(sc)) {
   10661 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10662 		return -1;
   10663 	}
   10664 
   10665 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10666 	sc->phy.release(sc);
   10667 
   10668 	return rv;
   10669 }
   10670 
   10671 static int
   10672 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10673 {
   10674 	struct wm_softc *sc = device_private(dev);
   10675 
   10676 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10677 		switch (sc->sc_phytype) {
   10678 		case WMPHY_IGP:
   10679 		case WMPHY_IGP_2:
   10680 		case WMPHY_IGP_3:
   10681 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10682 			    reg);
   10683 			break;
   10684 		default:
   10685 #ifdef WM_DEBUG
   10686 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10687 			    __func__, sc->sc_phytype, reg);
   10688 #endif
   10689 			break;
   10690 		}
   10691 	}
   10692 
   10693 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10694 
   10695 	return 0;
   10696 }
   10697 
   10698 /*
   10699  * wm_gmii_i80003_readreg:	[mii interface function]
   10700  *
   10701  *	Read a PHY register on the kumeran
   10702  * This could be handled by the PHY layer if we didn't have to lock the
   10703  * ressource ...
   10704  */
   10705 static int
   10706 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10707 {
   10708 	struct wm_softc *sc = device_private(dev);
   10709 	int page_select;
   10710 	uint16_t temp, temp2;
   10711 	int rv = 0;
   10712 
   10713 	if (phy != 1) /* only one PHY on kumeran bus */
   10714 		return -1;
   10715 
   10716 	if (sc->phy.acquire(sc)) {
   10717 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10718 		return -1;
   10719 	}
   10720 
   10721 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10722 		page_select = GG82563_PHY_PAGE_SELECT;
   10723 	else {
   10724 		/*
   10725 		 * Use Alternative Page Select register to access registers
   10726 		 * 30 and 31.
   10727 		 */
   10728 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10729 	}
   10730 	temp = reg >> GG82563_PAGE_SHIFT;
   10731 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10732 		goto out;
   10733 
   10734 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10735 		/*
   10736 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10737 		 * register.
   10738 		 */
   10739 		delay(200);
   10740 		wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10741 		if (temp2 != temp) {
   10742 			device_printf(dev, "%s failed\n", __func__);
   10743 			rv = -1;
   10744 			goto out;
   10745 		}
   10746 		delay(200);
   10747 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10748 		delay(200);
   10749 	} else
   10750 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10751 
   10752 out:
   10753 	sc->phy.release(sc);
   10754 	return rv;
   10755 }
   10756 
   10757 /*
   10758  * wm_gmii_i80003_writereg:	[mii interface function]
   10759  *
   10760  *	Write a PHY register on the kumeran.
   10761  * This could be handled by the PHY layer if we didn't have to lock the
   10762  * ressource ...
   10763  */
   10764 static int
   10765 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   10766 {
   10767 	struct wm_softc *sc = device_private(dev);
   10768 	int page_select, rv;
   10769 	uint16_t temp, temp2;
   10770 
   10771 	if (phy != 1) /* only one PHY on kumeran bus */
   10772 		return -1;
   10773 
   10774 	if (sc->phy.acquire(sc)) {
   10775 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10776 		return -1;
   10777 	}
   10778 
   10779 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10780 		page_select = GG82563_PHY_PAGE_SELECT;
   10781 	else {
   10782 		/*
   10783 		 * Use Alternative Page Select register to access registers
   10784 		 * 30 and 31.
   10785 		 */
   10786 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10787 	}
   10788 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10789 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10790 		goto out;
   10791 
   10792 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10793 		/*
   10794 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10795 		 * register.
   10796 		 */
   10797 		delay(200);
   10798 		wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10799 		if (temp2 != temp) {
   10800 			device_printf(dev, "%s failed\n", __func__);
   10801 			rv = -1;
   10802 			goto out;
   10803 		}
   10804 		delay(200);
   10805 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10806 		delay(200);
   10807 	} else
   10808 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10809 
   10810 out:
   10811 	sc->phy.release(sc);
   10812 	return rv;
   10813 }
   10814 
   10815 /*
   10816  * wm_gmii_bm_readreg:	[mii interface function]
   10817  *
   10818  *	Read a PHY register on the kumeran
   10819  * This could be handled by the PHY layer if we didn't have to lock the
   10820  * ressource ...
   10821  */
   10822 static int
   10823 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10824 {
   10825 	struct wm_softc *sc = device_private(dev);
   10826 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10827 	int rv;
   10828 
   10829 	if (sc->phy.acquire(sc)) {
   10830 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10831 		return -1;
   10832 	}
   10833 
   10834 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10835 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10836 		    || (reg == 31)) ? 1 : phy;
   10837 	/* Page 800 works differently than the rest so it has its own func */
   10838 	if (page == BM_WUC_PAGE) {
   10839 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   10840 		goto release;
   10841 	}
   10842 
   10843 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10844 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10845 		    && (sc->sc_type != WM_T_82583))
   10846 			rv = wm_gmii_mdic_writereg(dev, phy,
   10847 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10848 		else
   10849 			rv = wm_gmii_mdic_writereg(dev, phy,
   10850 			    BME1000_PHY_PAGE_SELECT, page);
   10851 		if (rv != 0)
   10852 			goto release;
   10853 	}
   10854 
   10855 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10856 
   10857 release:
   10858 	sc->phy.release(sc);
   10859 	return rv;
   10860 }
   10861 
   10862 /*
   10863  * wm_gmii_bm_writereg:	[mii interface function]
   10864  *
   10865  *	Write a PHY register on the kumeran.
   10866  * This could be handled by the PHY layer if we didn't have to lock the
   10867  * ressource ...
   10868  */
   10869 static int
   10870 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   10871 {
   10872 	struct wm_softc *sc = device_private(dev);
   10873 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10874 	int rv;
   10875 
   10876 	if (sc->phy.acquire(sc)) {
   10877 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10878 		return -1;
   10879 	}
   10880 
   10881 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10882 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10883 		    || (reg == 31)) ? 1 : phy;
   10884 	/* Page 800 works differently than the rest so it has its own func */
   10885 	if (page == BM_WUC_PAGE) {
   10886 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   10887 		goto release;
   10888 	}
   10889 
   10890 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10891 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10892 		    && (sc->sc_type != WM_T_82583))
   10893 			rv = wm_gmii_mdic_writereg(dev, phy,
   10894 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10895 		else
   10896 			rv = wm_gmii_mdic_writereg(dev, phy,
   10897 			    BME1000_PHY_PAGE_SELECT, page);
   10898 		if (rv != 0)
   10899 			goto release;
   10900 	}
   10901 
   10902 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10903 
   10904 release:
   10905 	sc->phy.release(sc);
   10906 	return rv;
   10907 }
   10908 
   10909 /*
   10910  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   10911  *  @dev: pointer to the HW structure
   10912  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   10913  *
   10914  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   10915  *  address to store contents of the BM_WUC_ENABLE_REG register.
   10916  */
   10917 static int
   10918 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10919 {
   10920 	uint16_t temp;
   10921 	int rv;
   10922 
   10923 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10924 		device_xname(dev), __func__));
   10925 
   10926 	if (!phy_regp)
   10927 		return -1;
   10928 
   10929 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   10930 
   10931 	/* Select Port Control Registers page */
   10932 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10933 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10934 	if (rv != 0)
   10935 		return rv;
   10936 
   10937 	/* Read WUCE and save it */
   10938 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   10939 	if (rv != 0)
   10940 		return rv;
   10941 
   10942 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   10943 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   10944 	 */
   10945 	temp = *phy_regp;
   10946 	temp |= BM_WUC_ENABLE_BIT;
   10947 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10948 
   10949 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   10950 		return rv;
   10951 
   10952 	/* Select Host Wakeup Registers page - caller now able to write
   10953 	 * registers on the Wakeup registers page
   10954 	 */
   10955 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10956 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   10957 }
   10958 
   10959 /*
   10960  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   10961  *  @dev: pointer to the HW structure
   10962  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   10963  *
   10964  *  Restore BM_WUC_ENABLE_REG to its original value.
   10965  *
   10966  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   10967  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   10968  *  caller.
   10969  */
   10970 static int
   10971 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10972 {
   10973 
   10974 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10975 		device_xname(dev), __func__));
   10976 
   10977 	if (!phy_regp)
   10978 		return -1;
   10979 
   10980 	/* Select Port Control Registers page */
   10981 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10982 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10983 
   10984 	/* Restore 769.17 to its original value */
   10985 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   10986 
   10987 	return 0;
   10988 }
   10989 
   10990 /*
   10991  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   10992  *  @sc: pointer to the HW structure
   10993  *  @offset: register offset to be read or written
   10994  *  @val: pointer to the data to read or write
   10995  *  @rd: determines if operation is read or write
   10996  *  @page_set: BM_WUC_PAGE already set and access enabled
   10997  *
   10998  *  Read the PHY register at offset and store the retrieved information in
   10999  *  data, or write data to PHY register at offset.  Note the procedure to
   11000  *  access the PHY wakeup registers is different than reading the other PHY
   11001  *  registers. It works as such:
   11002  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11003  *  2) Set page to 800 for host (801 if we were manageability)
   11004  *  3) Write the address using the address opcode (0x11)
   11005  *  4) Read or write the data using the data opcode (0x12)
   11006  *  5) Restore 769.17.2 to its original value
   11007  *
   11008  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11009  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11010  *
   11011  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11012  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11013  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11014  */
   11015 static int
   11016 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11017 	bool page_set)
   11018 {
   11019 	struct wm_softc *sc = device_private(dev);
   11020 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11021 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11022 	uint16_t wuce;
   11023 	int rv = 0;
   11024 
   11025 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11026 		device_xname(dev), __func__));
   11027 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11028 	if ((sc->sc_type == WM_T_PCH)
   11029 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11030 		device_printf(dev,
   11031 		    "Attempting to access page %d while gig enabled.\n", page);
   11032 	}
   11033 
   11034 	if (!page_set) {
   11035 		/* Enable access to PHY wakeup registers */
   11036 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11037 		if (rv != 0) {
   11038 			device_printf(dev,
   11039 			    "%s: Could not enable PHY wakeup reg access\n",
   11040 			    __func__);
   11041 			return rv;
   11042 		}
   11043 	}
   11044 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11045 		device_xname(sc->sc_dev), __func__, page, regnum));
   11046 
   11047 	/*
   11048 	 * 2) Access PHY wakeup register.
   11049 	 * See wm_access_phy_wakeup_reg_bm.
   11050 	 */
   11051 
   11052 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11053 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11054 	if (rv != 0)
   11055 		return rv;
   11056 
   11057 	if (rd) {
   11058 		/* Read the Wakeup register page value using opcode 0x12 */
   11059 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11060 	} else {
   11061 		/* Write the Wakeup register page value using opcode 0x12 */
   11062 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11063 	}
   11064 	if (rv != 0)
   11065 		return rv;
   11066 
   11067 	if (!page_set)
   11068 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11069 
   11070 	return rv;
   11071 }
   11072 
   11073 /*
   11074  * wm_gmii_hv_readreg:	[mii interface function]
   11075  *
   11076  *	Read a PHY register on the kumeran
   11077  * This could be handled by the PHY layer if we didn't have to lock the
   11078  * ressource ...
   11079  */
   11080 static int
   11081 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11082 {
   11083 	struct wm_softc *sc = device_private(dev);
   11084 	int rv;
   11085 
   11086 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11087 		device_xname(dev), __func__));
   11088 	if (sc->phy.acquire(sc)) {
   11089 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11090 		return -1;
   11091 	}
   11092 
   11093 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11094 	sc->phy.release(sc);
   11095 	return rv;
   11096 }
   11097 
   11098 static int
   11099 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11100 {
   11101 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11102 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11103 	int rv;
   11104 
   11105 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11106 
   11107 	/* Page 800 works differently than the rest so it has its own func */
   11108 	if (page == BM_WUC_PAGE)
   11109 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11110 
   11111 	/*
   11112 	 * Lower than page 768 works differently than the rest so it has its
   11113 	 * own func
   11114 	 */
   11115 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11116 		printf("gmii_hv_readreg!!!\n");
   11117 		return -1;
   11118 	}
   11119 
   11120 	/*
   11121 	 * XXX I21[789] documents say that the SMBus Address register is at
   11122 	 * PHY address 01, Page 0 (not 768), Register 26.
   11123 	 */
   11124 	if (page == HV_INTC_FC_PAGE_START)
   11125 		page = 0;
   11126 
   11127 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11128 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11129 		    page << BME1000_PAGE_SHIFT);
   11130 		if (rv != 0)
   11131 			return rv;
   11132 	}
   11133 
   11134 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11135 }
   11136 
   11137 /*
   11138  * wm_gmii_hv_writereg:	[mii interface function]
   11139  *
   11140  *	Write a PHY register on the kumeran.
   11141  * This could be handled by the PHY layer if we didn't have to lock the
   11142  * ressource ...
   11143  */
   11144 static int
   11145 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11146 {
   11147 	struct wm_softc *sc = device_private(dev);
   11148 	int rv;
   11149 
   11150 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11151 		device_xname(dev), __func__));
   11152 
   11153 	if (sc->phy.acquire(sc)) {
   11154 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11155 		return -1;
   11156 	}
   11157 
   11158 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11159 	sc->phy.release(sc);
   11160 
   11161 	return rv;
   11162 }
   11163 
   11164 static int
   11165 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11166 {
   11167 	struct wm_softc *sc = device_private(dev);
   11168 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11169 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11170 	int rv;
   11171 
   11172 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11173 
   11174 	/* Page 800 works differently than the rest so it has its own func */
   11175 	if (page == BM_WUC_PAGE)
   11176 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11177 		    false);
   11178 
   11179 	/*
   11180 	 * Lower than page 768 works differently than the rest so it has its
   11181 	 * own func
   11182 	 */
   11183 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11184 		printf("gmii_hv_writereg!!!\n");
   11185 		return -1;
   11186 	}
   11187 
   11188 	{
   11189 		/*
   11190 		 * XXX I21[789] documents say that the SMBus Address register
   11191 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11192 		 */
   11193 		if (page == HV_INTC_FC_PAGE_START)
   11194 			page = 0;
   11195 
   11196 		/*
   11197 		 * XXX Workaround MDIO accesses being disabled after entering
   11198 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11199 		 * register is set)
   11200 		 */
   11201 		if (sc->sc_phytype == WMPHY_82578) {
   11202 			struct mii_softc *child;
   11203 
   11204 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11205 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11206 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11207 			    && ((val & (1 << 11)) != 0)) {
   11208 				printf("XXX need workaround\n");
   11209 			}
   11210 		}
   11211 
   11212 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11213 			rv = wm_gmii_mdic_writereg(dev, 1,
   11214 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11215 			if (rv != 0)
   11216 				return rv;
   11217 		}
   11218 	}
   11219 
   11220 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11221 }
   11222 
   11223 /*
   11224  * wm_gmii_82580_readreg:	[mii interface function]
   11225  *
   11226  *	Read a PHY register on the 82580 and I350.
   11227  * This could be handled by the PHY layer if we didn't have to lock the
   11228  * ressource ...
   11229  */
   11230 static int
   11231 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11232 {
   11233 	struct wm_softc *sc = device_private(dev);
   11234 	int rv;
   11235 
   11236 	if (sc->phy.acquire(sc) != 0) {
   11237 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11238 		return -1;
   11239 	}
   11240 
   11241 #ifdef DIAGNOSTIC
   11242 	if (reg > MII_ADDRMASK) {
   11243 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11244 		    __func__, sc->sc_phytype, reg);
   11245 		reg &= MII_ADDRMASK;
   11246 	}
   11247 #endif
   11248 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11249 
   11250 	sc->phy.release(sc);
   11251 	return rv;
   11252 }
   11253 
   11254 /*
   11255  * wm_gmii_82580_writereg:	[mii interface function]
   11256  *
   11257  *	Write a PHY register on the 82580 and I350.
   11258  * This could be handled by the PHY layer if we didn't have to lock the
   11259  * ressource ...
   11260  */
   11261 static int
   11262 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11263 {
   11264 	struct wm_softc *sc = device_private(dev);
   11265 	int rv;
   11266 
   11267 	if (sc->phy.acquire(sc) != 0) {
   11268 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11269 		return -1;
   11270 	}
   11271 
   11272 #ifdef DIAGNOSTIC
   11273 	if (reg > MII_ADDRMASK) {
   11274 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11275 		    __func__, sc->sc_phytype, reg);
   11276 		reg &= MII_ADDRMASK;
   11277 	}
   11278 #endif
   11279 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11280 
   11281 	sc->phy.release(sc);
   11282 	return rv;
   11283 }
   11284 
   11285 /*
   11286  * wm_gmii_gs40g_readreg:	[mii interface function]
   11287  *
   11288  *	Read a PHY register on the I2100 and I211.
   11289  * This could be handled by the PHY layer if we didn't have to lock the
   11290  * ressource ...
   11291  */
   11292 static int
   11293 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11294 {
   11295 	struct wm_softc *sc = device_private(dev);
   11296 	int page, offset;
   11297 	int rv;
   11298 
   11299 	/* Acquire semaphore */
   11300 	if (sc->phy.acquire(sc)) {
   11301 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11302 		return -1;
   11303 	}
   11304 
   11305 	/* Page select */
   11306 	page = reg >> GS40G_PAGE_SHIFT;
   11307 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11308 	if (rv != 0)
   11309 		goto release;
   11310 
   11311 	/* Read reg */
   11312 	offset = reg & GS40G_OFFSET_MASK;
   11313 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11314 
   11315 release:
   11316 	sc->phy.release(sc);
   11317 	return rv;
   11318 }
   11319 
   11320 /*
   11321  * wm_gmii_gs40g_writereg:	[mii interface function]
   11322  *
   11323  *	Write a PHY register on the I210 and I211.
   11324  * This could be handled by the PHY layer if we didn't have to lock the
   11325  * ressource ...
   11326  */
   11327 static int
   11328 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11329 {
   11330 	struct wm_softc *sc = device_private(dev);
   11331 	uint16_t page;
   11332 	int offset, rv;
   11333 
   11334 	/* Acquire semaphore */
   11335 	if (sc->phy.acquire(sc)) {
   11336 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11337 		return -1;
   11338 	}
   11339 
   11340 	/* Page select */
   11341 	page = reg >> GS40G_PAGE_SHIFT;
   11342 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11343 	if (rv != 0)
   11344 		goto release;
   11345 
   11346 	/* Write reg */
   11347 	offset = reg & GS40G_OFFSET_MASK;
   11348 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11349 
   11350 release:
   11351 	/* Release semaphore */
   11352 	sc->phy.release(sc);
   11353 	return rv;
   11354 }
   11355 
   11356 /*
   11357  * wm_gmii_statchg:	[mii interface function]
   11358  *
   11359  *	Callback from MII layer when media changes.
   11360  */
   11361 static void
   11362 wm_gmii_statchg(struct ifnet *ifp)
   11363 {
   11364 	struct wm_softc *sc = ifp->if_softc;
   11365 	struct mii_data *mii = &sc->sc_mii;
   11366 
   11367 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11368 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11369 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11370 
   11371 	/*
   11372 	 * Get flow control negotiation result.
   11373 	 */
   11374 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11375 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11376 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11377 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11378 	}
   11379 
   11380 	if (sc->sc_flowflags & IFM_FLOW) {
   11381 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11382 			sc->sc_ctrl |= CTRL_TFCE;
   11383 			sc->sc_fcrtl |= FCRTL_XONE;
   11384 		}
   11385 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11386 			sc->sc_ctrl |= CTRL_RFCE;
   11387 	}
   11388 
   11389 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   11390 		DPRINTF(WM_DEBUG_LINK,
   11391 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11392 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11393 	} else {
   11394 		DPRINTF(WM_DEBUG_LINK,
   11395 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11396 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11397 	}
   11398 
   11399 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11400 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11401 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11402 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11403 	if (sc->sc_type == WM_T_80003) {
   11404 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   11405 		case IFM_1000_T:
   11406 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11407 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11408 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11409 			break;
   11410 		default:
   11411 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11412 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11413 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11414 			break;
   11415 		}
   11416 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11417 	}
   11418 }
   11419 
   11420 /* kumeran related (80003, ICH* and PCH*) */
   11421 
   11422 /*
   11423  * wm_kmrn_readreg:
   11424  *
   11425  *	Read a kumeran register
   11426  */
   11427 static int
   11428 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11429 {
   11430 	int rv;
   11431 
   11432 	if (sc->sc_type == WM_T_80003)
   11433 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11434 	else
   11435 		rv = sc->phy.acquire(sc);
   11436 	if (rv != 0) {
   11437 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11438 		    __func__);
   11439 		return rv;
   11440 	}
   11441 
   11442 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11443 
   11444 	if (sc->sc_type == WM_T_80003)
   11445 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11446 	else
   11447 		sc->phy.release(sc);
   11448 
   11449 	return rv;
   11450 }
   11451 
   11452 static int
   11453 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11454 {
   11455 
   11456 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11457 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11458 	    KUMCTRLSTA_REN);
   11459 	CSR_WRITE_FLUSH(sc);
   11460 	delay(2);
   11461 
   11462 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11463 
   11464 	return 0;
   11465 }
   11466 
   11467 /*
   11468  * wm_kmrn_writereg:
   11469  *
   11470  *	Write a kumeran register
   11471  */
   11472 static int
   11473 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11474 {
   11475 	int rv;
   11476 
   11477 	if (sc->sc_type == WM_T_80003)
   11478 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11479 	else
   11480 		rv = sc->phy.acquire(sc);
   11481 	if (rv != 0) {
   11482 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11483 		    __func__);
   11484 		return rv;
   11485 	}
   11486 
   11487 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11488 
   11489 	if (sc->sc_type == WM_T_80003)
   11490 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11491 	else
   11492 		sc->phy.release(sc);
   11493 
   11494 	return rv;
   11495 }
   11496 
   11497 static int
   11498 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11499 {
   11500 
   11501 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11502 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11503 
   11504 	return 0;
   11505 }
   11506 
   11507 /*
   11508  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11509  * This access method is different from IEEE MMD.
   11510  */
   11511 static int
   11512 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11513 {
   11514 	struct wm_softc *sc = device_private(dev);
   11515 	int rv;
   11516 
   11517 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11518 	if (rv != 0)
   11519 		return rv;
   11520 
   11521 	if (rd)
   11522 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11523 	else
   11524 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11525 	return rv;
   11526 }
   11527 
   11528 static int
   11529 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11530 {
   11531 
   11532 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11533 }
   11534 
   11535 static int
   11536 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11537 {
   11538 
   11539 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11540 }
   11541 
   11542 /* SGMII related */
   11543 
   11544 /*
   11545  * wm_sgmii_uses_mdio
   11546  *
   11547  * Check whether the transaction is to the internal PHY or the external
   11548  * MDIO interface. Return true if it's MDIO.
   11549  */
   11550 static bool
   11551 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11552 {
   11553 	uint32_t reg;
   11554 	bool ismdio = false;
   11555 
   11556 	switch (sc->sc_type) {
   11557 	case WM_T_82575:
   11558 	case WM_T_82576:
   11559 		reg = CSR_READ(sc, WMREG_MDIC);
   11560 		ismdio = ((reg & MDIC_DEST) != 0);
   11561 		break;
   11562 	case WM_T_82580:
   11563 	case WM_T_I350:
   11564 	case WM_T_I354:
   11565 	case WM_T_I210:
   11566 	case WM_T_I211:
   11567 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11568 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11569 		break;
   11570 	default:
   11571 		break;
   11572 	}
   11573 
   11574 	return ismdio;
   11575 }
   11576 
   11577 /*
   11578  * wm_sgmii_readreg:	[mii interface function]
   11579  *
   11580  *	Read a PHY register on the SGMII
   11581  * This could be handled by the PHY layer if we didn't have to lock the
   11582  * ressource ...
   11583  */
   11584 static int
   11585 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11586 {
   11587 	struct wm_softc *sc = device_private(dev);
   11588 	int rv;
   11589 
   11590 	if (sc->phy.acquire(sc)) {
   11591 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11592 		return -1;
   11593 	}
   11594 
   11595 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11596 
   11597 	sc->phy.release(sc);
   11598 	return rv;
   11599 }
   11600 
   11601 static int
   11602 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11603 {
   11604 	struct wm_softc *sc = device_private(dev);
   11605 	uint32_t i2ccmd;
   11606 	int i, rv;
   11607 
   11608 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11609 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11610 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11611 
   11612 	/* Poll the ready bit */
   11613 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11614 		delay(50);
   11615 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11616 		if (i2ccmd & I2CCMD_READY)
   11617 			break;
   11618 	}
   11619 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11620 		device_printf(dev, "I2CCMD Read did not complete\n");
   11621 		rv = ETIMEDOUT;
   11622 	}
   11623 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11624 		device_printf(dev, "I2CCMD Error bit set\n");
   11625 		rv = EIO;
   11626 	}
   11627 
   11628 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11629 
   11630 	return rv;
   11631 }
   11632 
   11633 /*
   11634  * wm_sgmii_writereg:	[mii interface function]
   11635  *
   11636  *	Write a PHY register on the SGMII.
   11637  * This could be handled by the PHY layer if we didn't have to lock the
   11638  * ressource ...
   11639  */
   11640 static int
   11641 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11642 {
   11643 	struct wm_softc *sc = device_private(dev);
   11644 	int rv;
   11645 
   11646 	if (sc->phy.acquire(sc) != 0) {
   11647 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11648 		return -1;
   11649 	}
   11650 
   11651 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11652 
   11653 	sc->phy.release(sc);
   11654 
   11655 	return rv;
   11656 }
   11657 
   11658 static int
   11659 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11660 {
   11661 	struct wm_softc *sc = device_private(dev);
   11662 	uint32_t i2ccmd;
   11663 	uint16_t swapdata;
   11664 	int rv = 0;
   11665 	int i;
   11666 
   11667 	/* Swap the data bytes for the I2C interface */
   11668 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11669 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11670 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11671 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11672 
   11673 	/* Poll the ready bit */
   11674 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11675 		delay(50);
   11676 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11677 		if (i2ccmd & I2CCMD_READY)
   11678 			break;
   11679 	}
   11680 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11681 		device_printf(dev, "I2CCMD Write did not complete\n");
   11682 		rv = ETIMEDOUT;
   11683 	}
   11684 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11685 		device_printf(dev, "I2CCMD Error bit set\n");
   11686 		rv = EIO;
   11687 	}
   11688 
   11689 	return rv;
   11690 }
   11691 
   11692 /* TBI related */
   11693 
   11694 static bool
   11695 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11696 {
   11697 	bool sig;
   11698 
   11699 	sig = ctrl & CTRL_SWDPIN(1);
   11700 
   11701 	/*
   11702 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11703 	 * detect a signal, 1 if they don't.
   11704 	 */
   11705 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11706 		sig = !sig;
   11707 
   11708 	return sig;
   11709 }
   11710 
   11711 /*
   11712  * wm_tbi_mediainit:
   11713  *
   11714  *	Initialize media for use on 1000BASE-X devices.
   11715  */
   11716 static void
   11717 wm_tbi_mediainit(struct wm_softc *sc)
   11718 {
   11719 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11720 	const char *sep = "";
   11721 
   11722 	if (sc->sc_type < WM_T_82543)
   11723 		sc->sc_tipg = TIPG_WM_DFLT;
   11724 	else
   11725 		sc->sc_tipg = TIPG_LG_DFLT;
   11726 
   11727 	sc->sc_tbi_serdes_anegticks = 5;
   11728 
   11729 	/* Initialize our media structures */
   11730 	sc->sc_mii.mii_ifp = ifp;
   11731 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11732 
   11733 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11734 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11735 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11736 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11737 	else
   11738 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11739 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11740 
   11741 	/*
   11742 	 * SWD Pins:
   11743 	 *
   11744 	 *	0 = Link LED (output)
   11745 	 *	1 = Loss Of Signal (input)
   11746 	 */
   11747 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11748 
   11749 	/* XXX Perhaps this is only for TBI */
   11750 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11751 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11752 
   11753 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11754 		sc->sc_ctrl &= ~CTRL_LRST;
   11755 
   11756 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11757 
   11758 #define	ADD(ss, mm, dd)							\
   11759 do {									\
   11760 	aprint_normal("%s%s", sep, ss);					\
   11761 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11762 	sep = ", ";							\
   11763 } while (/*CONSTCOND*/0)
   11764 
   11765 	aprint_normal_dev(sc->sc_dev, "");
   11766 
   11767 	if (sc->sc_type == WM_T_I354) {
   11768 		uint32_t status;
   11769 
   11770 		status = CSR_READ(sc, WMREG_STATUS);
   11771 		if (((status & STATUS_2P5_SKU) != 0)
   11772 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11773 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11774 		} else
   11775 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11776 	} else if (sc->sc_type == WM_T_82545) {
   11777 		/* Only 82545 is LX (XXX except SFP) */
   11778 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11779 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11780 	} else {
   11781 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11782 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11783 	}
   11784 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11785 	aprint_normal("\n");
   11786 
   11787 #undef ADD
   11788 
   11789 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11790 }
   11791 
   11792 /*
   11793  * wm_tbi_mediachange:	[ifmedia interface function]
   11794  *
   11795  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11796  */
   11797 static int
   11798 wm_tbi_mediachange(struct ifnet *ifp)
   11799 {
   11800 	struct wm_softc *sc = ifp->if_softc;
   11801 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11802 	uint32_t status, ctrl;
   11803 	bool signal;
   11804 	int i;
   11805 
   11806 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11807 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11808 		/* XXX need some work for >= 82571 and < 82575 */
   11809 		if (sc->sc_type < WM_T_82575)
   11810 			return 0;
   11811 	}
   11812 
   11813 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11814 	    || (sc->sc_type >= WM_T_82575))
   11815 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11816 
   11817 	sc->sc_ctrl &= ~CTRL_LRST;
   11818 	sc->sc_txcw = TXCW_ANE;
   11819 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11820 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11821 	else if (ife->ifm_media & IFM_FDX)
   11822 		sc->sc_txcw |= TXCW_FD;
   11823 	else
   11824 		sc->sc_txcw |= TXCW_HD;
   11825 
   11826 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11827 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11828 
   11829 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11830 		device_xname(sc->sc_dev), sc->sc_txcw));
   11831 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11832 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11833 	CSR_WRITE_FLUSH(sc);
   11834 	delay(1000);
   11835 
   11836 	ctrl =  CSR_READ(sc, WMREG_CTRL);
   11837 	signal = wm_tbi_havesignal(sc, ctrl);
   11838 
   11839 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11840 		signal));
   11841 
   11842 	if (signal) {
   11843 		/* Have signal; wait for the link to come up. */
   11844 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11845 			delay(10000);
   11846 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11847 				break;
   11848 		}
   11849 
   11850 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11851 			device_xname(sc->sc_dev),i));
   11852 
   11853 		status = CSR_READ(sc, WMREG_STATUS);
   11854 		DPRINTF(WM_DEBUG_LINK,
   11855 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11856 			device_xname(sc->sc_dev),status, STATUS_LU));
   11857 		if (status & STATUS_LU) {
   11858 			/* Link is up. */
   11859 			DPRINTF(WM_DEBUG_LINK,
   11860 			    ("%s: LINK: set media -> link up %s\n",
   11861 				device_xname(sc->sc_dev),
   11862 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11863 
   11864 			/*
   11865 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11866 			 * so we should update sc->sc_ctrl
   11867 			 */
   11868 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11869 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11870 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11871 			if (status & STATUS_FD)
   11872 				sc->sc_tctl |=
   11873 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11874 			else
   11875 				sc->sc_tctl |=
   11876 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11877 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11878 				sc->sc_fcrtl |= FCRTL_XONE;
   11879 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11880 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11881 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11882 			sc->sc_tbi_linkup = 1;
   11883 		} else {
   11884 			if (i == WM_LINKUP_TIMEOUT)
   11885 				wm_check_for_link(sc);
   11886 			/* Link is down. */
   11887 			DPRINTF(WM_DEBUG_LINK,
   11888 			    ("%s: LINK: set media -> link down\n",
   11889 				device_xname(sc->sc_dev)));
   11890 			sc->sc_tbi_linkup = 0;
   11891 		}
   11892 	} else {
   11893 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11894 			device_xname(sc->sc_dev)));
   11895 		sc->sc_tbi_linkup = 0;
   11896 	}
   11897 
   11898 	wm_tbi_serdes_set_linkled(sc);
   11899 
   11900 	return 0;
   11901 }
   11902 
   11903 /*
   11904  * wm_tbi_mediastatus:	[ifmedia interface function]
   11905  *
   11906  *	Get the current interface media status on a 1000BASE-X device.
   11907  */
   11908 static void
   11909 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11910 {
   11911 	struct wm_softc *sc = ifp->if_softc;
   11912 	uint32_t ctrl, status;
   11913 
   11914 	ifmr->ifm_status = IFM_AVALID;
   11915 	ifmr->ifm_active = IFM_ETHER;
   11916 
   11917 	status = CSR_READ(sc, WMREG_STATUS);
   11918 	if ((status & STATUS_LU) == 0) {
   11919 		ifmr->ifm_active |= IFM_NONE;
   11920 		return;
   11921 	}
   11922 
   11923 	ifmr->ifm_status |= IFM_ACTIVE;
   11924 	/* Only 82545 is LX */
   11925 	if (sc->sc_type == WM_T_82545)
   11926 		ifmr->ifm_active |= IFM_1000_LX;
   11927 	else
   11928 		ifmr->ifm_active |= IFM_1000_SX;
   11929 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11930 		ifmr->ifm_active |= IFM_FDX;
   11931 	else
   11932 		ifmr->ifm_active |= IFM_HDX;
   11933 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11934 	if (ctrl & CTRL_RFCE)
   11935 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11936 	if (ctrl & CTRL_TFCE)
   11937 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11938 }
   11939 
   11940 /* XXX TBI only */
   11941 static int
   11942 wm_check_for_link(struct wm_softc *sc)
   11943 {
   11944 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11945 	uint32_t rxcw;
   11946 	uint32_t ctrl;
   11947 	uint32_t status;
   11948 	bool signal;
   11949 
   11950 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11951 		device_xname(sc->sc_dev), __func__));
   11952 
   11953 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11954 		/* XXX need some work for >= 82571 */
   11955 		if (sc->sc_type >= WM_T_82571) {
   11956 			sc->sc_tbi_linkup = 1;
   11957 			return 0;
   11958 		}
   11959 	}
   11960 
   11961 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11962 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11963 	status = CSR_READ(sc, WMREG_STATUS);
   11964 	signal = wm_tbi_havesignal(sc, ctrl);
   11965 
   11966 	DPRINTF(WM_DEBUG_LINK,
   11967 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11968 		device_xname(sc->sc_dev), __func__, signal,
   11969 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11970 
   11971 	/*
   11972 	 * SWDPIN   LU RXCW
   11973 	 *	0    0	  0
   11974 	 *	0    0	  1	(should not happen)
   11975 	 *	0    1	  0	(should not happen)
   11976 	 *	0    1	  1	(should not happen)
   11977 	 *	1    0	  0	Disable autonego and force linkup
   11978 	 *	1    0	  1	got /C/ but not linkup yet
   11979 	 *	1    1	  0	(linkup)
   11980 	 *	1    1	  1	If IFM_AUTO, back to autonego
   11981 	 *
   11982 	 */
   11983 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   11984 		DPRINTF(WM_DEBUG_LINK,
   11985 		    ("%s: %s: force linkup and fullduplex\n",
   11986 			device_xname(sc->sc_dev), __func__));
   11987 		sc->sc_tbi_linkup = 0;
   11988 		/* Disable auto-negotiation in the TXCW register */
   11989 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11990 
   11991 		/*
   11992 		 * Force link-up and also force full-duplex.
   11993 		 *
   11994 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11995 		 * so we should update sc->sc_ctrl
   11996 		 */
   11997 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11998 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11999 	} else if (((status & STATUS_LU) != 0)
   12000 	    && ((rxcw & RXCW_C) != 0)
   12001 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12002 		sc->sc_tbi_linkup = 1;
   12003 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12004 			device_xname(sc->sc_dev),
   12005 			__func__));
   12006 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12007 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12008 	} else if (signal && ((rxcw & RXCW_C) != 0))
   12009 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12010 			device_xname(sc->sc_dev), __func__));
   12011 	else
   12012 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12013 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12014 			status));
   12015 
   12016 	return 0;
   12017 }
   12018 
   12019 /*
   12020  * wm_tbi_tick:
   12021  *
   12022  *	Check the link on TBI devices.
   12023  *	This function acts as mii_tick().
   12024  */
   12025 static void
   12026 wm_tbi_tick(struct wm_softc *sc)
   12027 {
   12028 	struct mii_data *mii = &sc->sc_mii;
   12029 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12030 	uint32_t status;
   12031 
   12032 	KASSERT(WM_CORE_LOCKED(sc));
   12033 
   12034 	status = CSR_READ(sc, WMREG_STATUS);
   12035 
   12036 	/* XXX is this needed? */
   12037 	(void)CSR_READ(sc, WMREG_RXCW);
   12038 	(void)CSR_READ(sc, WMREG_CTRL);
   12039 
   12040 	/* set link status */
   12041 	if ((status & STATUS_LU) == 0) {
   12042 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12043 			device_xname(sc->sc_dev)));
   12044 		sc->sc_tbi_linkup = 0;
   12045 	} else if (sc->sc_tbi_linkup == 0) {
   12046 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12047 			device_xname(sc->sc_dev),
   12048 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12049 		sc->sc_tbi_linkup = 1;
   12050 		sc->sc_tbi_serdes_ticks = 0;
   12051 	}
   12052 
   12053 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12054 		goto setled;
   12055 
   12056 	if ((status & STATUS_LU) == 0) {
   12057 		sc->sc_tbi_linkup = 0;
   12058 		/* If the timer expired, retry autonegotiation */
   12059 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12060 		    && (++sc->sc_tbi_serdes_ticks
   12061 			>= sc->sc_tbi_serdes_anegticks)) {
   12062 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12063 			sc->sc_tbi_serdes_ticks = 0;
   12064 			/*
   12065 			 * Reset the link, and let autonegotiation do
   12066 			 * its thing
   12067 			 */
   12068 			sc->sc_ctrl |= CTRL_LRST;
   12069 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12070 			CSR_WRITE_FLUSH(sc);
   12071 			delay(1000);
   12072 			sc->sc_ctrl &= ~CTRL_LRST;
   12073 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12074 			CSR_WRITE_FLUSH(sc);
   12075 			delay(1000);
   12076 			CSR_WRITE(sc, WMREG_TXCW,
   12077 			    sc->sc_txcw & ~TXCW_ANE);
   12078 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12079 		}
   12080 	}
   12081 
   12082 setled:
   12083 	wm_tbi_serdes_set_linkled(sc);
   12084 }
   12085 
   12086 /* SERDES related */
   12087 static void
   12088 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12089 {
   12090 	uint32_t reg;
   12091 
   12092 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12093 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12094 		return;
   12095 
   12096 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12097 	reg |= PCS_CFG_PCS_EN;
   12098 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12099 
   12100 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12101 	reg &= ~CTRL_EXT_SWDPIN(3);
   12102 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12103 	CSR_WRITE_FLUSH(sc);
   12104 }
   12105 
   12106 static int
   12107 wm_serdes_mediachange(struct ifnet *ifp)
   12108 {
   12109 	struct wm_softc *sc = ifp->if_softc;
   12110 	bool pcs_autoneg = true; /* XXX */
   12111 	uint32_t ctrl_ext, pcs_lctl, reg;
   12112 
   12113 	/* XXX Currently, this function is not called on 8257[12] */
   12114 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12115 	    || (sc->sc_type >= WM_T_82575))
   12116 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12117 
   12118 	wm_serdes_power_up_link_82575(sc);
   12119 
   12120 	sc->sc_ctrl |= CTRL_SLU;
   12121 
   12122 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12123 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12124 
   12125 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12126 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12127 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12128 	case CTRL_EXT_LINK_MODE_SGMII:
   12129 		pcs_autoneg = true;
   12130 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12131 		break;
   12132 	case CTRL_EXT_LINK_MODE_1000KX:
   12133 		pcs_autoneg = false;
   12134 		/* FALLTHROUGH */
   12135 	default:
   12136 		if ((sc->sc_type == WM_T_82575)
   12137 		    || (sc->sc_type == WM_T_82576)) {
   12138 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12139 				pcs_autoneg = false;
   12140 		}
   12141 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12142 		    | CTRL_FRCFDX;
   12143 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12144 	}
   12145 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12146 
   12147 	if (pcs_autoneg) {
   12148 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12149 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12150 
   12151 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12152 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12153 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12154 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12155 	} else
   12156 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12157 
   12158 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12159 
   12160 
   12161 	return 0;
   12162 }
   12163 
   12164 static void
   12165 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12166 {
   12167 	struct wm_softc *sc = ifp->if_softc;
   12168 	struct mii_data *mii = &sc->sc_mii;
   12169 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12170 	uint32_t pcs_adv, pcs_lpab, reg;
   12171 
   12172 	ifmr->ifm_status = IFM_AVALID;
   12173 	ifmr->ifm_active = IFM_ETHER;
   12174 
   12175 	/* Check PCS */
   12176 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12177 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12178 		ifmr->ifm_active |= IFM_NONE;
   12179 		sc->sc_tbi_linkup = 0;
   12180 		goto setled;
   12181 	}
   12182 
   12183 	sc->sc_tbi_linkup = 1;
   12184 	ifmr->ifm_status |= IFM_ACTIVE;
   12185 	if (sc->sc_type == WM_T_I354) {
   12186 		uint32_t status;
   12187 
   12188 		status = CSR_READ(sc, WMREG_STATUS);
   12189 		if (((status & STATUS_2P5_SKU) != 0)
   12190 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12191 			ifmr->ifm_active |= IFM_2500_KX;
   12192 		} else
   12193 			ifmr->ifm_active |= IFM_1000_KX;
   12194 	} else {
   12195 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12196 		case PCS_LSTS_SPEED_10:
   12197 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12198 			break;
   12199 		case PCS_LSTS_SPEED_100:
   12200 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12201 			break;
   12202 		case PCS_LSTS_SPEED_1000:
   12203 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12204 			break;
   12205 		default:
   12206 			device_printf(sc->sc_dev, "Unknown speed\n");
   12207 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12208 			break;
   12209 		}
   12210 	}
   12211 	if ((reg & PCS_LSTS_FDX) != 0)
   12212 		ifmr->ifm_active |= IFM_FDX;
   12213 	else
   12214 		ifmr->ifm_active |= IFM_HDX;
   12215 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12216 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12217 		/* Check flow */
   12218 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12219 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12220 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12221 			goto setled;
   12222 		}
   12223 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12224 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12225 		DPRINTF(WM_DEBUG_LINK,
   12226 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12227 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12228 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12229 			mii->mii_media_active |= IFM_FLOW
   12230 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12231 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12232 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12233 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12234 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12235 			mii->mii_media_active |= IFM_FLOW
   12236 			    | IFM_ETH_TXPAUSE;
   12237 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12238 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12239 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12240 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12241 			mii->mii_media_active |= IFM_FLOW
   12242 			    | IFM_ETH_RXPAUSE;
   12243 		}
   12244 	}
   12245 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12246 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12247 setled:
   12248 	wm_tbi_serdes_set_linkled(sc);
   12249 }
   12250 
   12251 /*
   12252  * wm_serdes_tick:
   12253  *
   12254  *	Check the link on serdes devices.
   12255  */
   12256 static void
   12257 wm_serdes_tick(struct wm_softc *sc)
   12258 {
   12259 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12260 	struct mii_data *mii = &sc->sc_mii;
   12261 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12262 	uint32_t reg;
   12263 
   12264 	KASSERT(WM_CORE_LOCKED(sc));
   12265 
   12266 	mii->mii_media_status = IFM_AVALID;
   12267 	mii->mii_media_active = IFM_ETHER;
   12268 
   12269 	/* Check PCS */
   12270 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12271 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12272 		mii->mii_media_status |= IFM_ACTIVE;
   12273 		sc->sc_tbi_linkup = 1;
   12274 		sc->sc_tbi_serdes_ticks = 0;
   12275 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12276 		if ((reg & PCS_LSTS_FDX) != 0)
   12277 			mii->mii_media_active |= IFM_FDX;
   12278 		else
   12279 			mii->mii_media_active |= IFM_HDX;
   12280 	} else {
   12281 		mii->mii_media_status |= IFM_NONE;
   12282 		sc->sc_tbi_linkup = 0;
   12283 		/* If the timer expired, retry autonegotiation */
   12284 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12285 		    && (++sc->sc_tbi_serdes_ticks
   12286 			>= sc->sc_tbi_serdes_anegticks)) {
   12287 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12288 			sc->sc_tbi_serdes_ticks = 0;
   12289 			/* XXX */
   12290 			wm_serdes_mediachange(ifp);
   12291 		}
   12292 	}
   12293 
   12294 	wm_tbi_serdes_set_linkled(sc);
   12295 }
   12296 
   12297 /* SFP related */
   12298 
   12299 static int
   12300 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12301 {
   12302 	uint32_t i2ccmd;
   12303 	int i;
   12304 
   12305 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12306 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12307 
   12308 	/* Poll the ready bit */
   12309 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12310 		delay(50);
   12311 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12312 		if (i2ccmd & I2CCMD_READY)
   12313 			break;
   12314 	}
   12315 	if ((i2ccmd & I2CCMD_READY) == 0)
   12316 		return -1;
   12317 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12318 		return -1;
   12319 
   12320 	*data = i2ccmd & 0x00ff;
   12321 
   12322 	return 0;
   12323 }
   12324 
   12325 static uint32_t
   12326 wm_sfp_get_media_type(struct wm_softc *sc)
   12327 {
   12328 	uint32_t ctrl_ext;
   12329 	uint8_t val = 0;
   12330 	int timeout = 3;
   12331 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12332 	int rv = -1;
   12333 
   12334 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12335 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12336 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12337 	CSR_WRITE_FLUSH(sc);
   12338 
   12339 	/* Read SFP module data */
   12340 	while (timeout) {
   12341 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12342 		if (rv == 0)
   12343 			break;
   12344 		delay(100*1000); /* XXX too big */
   12345 		timeout--;
   12346 	}
   12347 	if (rv != 0)
   12348 		goto out;
   12349 	switch (val) {
   12350 	case SFF_SFP_ID_SFF:
   12351 		aprint_normal_dev(sc->sc_dev,
   12352 		    "Module/Connector soldered to board\n");
   12353 		break;
   12354 	case SFF_SFP_ID_SFP:
   12355 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   12356 		break;
   12357 	case SFF_SFP_ID_UNKNOWN:
   12358 		goto out;
   12359 	default:
   12360 		break;
   12361 	}
   12362 
   12363 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12364 	if (rv != 0) {
   12365 		goto out;
   12366 	}
   12367 
   12368 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12369 		mediatype = WM_MEDIATYPE_SERDES;
   12370 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12371 		sc->sc_flags |= WM_F_SGMII;
   12372 		mediatype = WM_MEDIATYPE_COPPER;
   12373 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12374 		sc->sc_flags |= WM_F_SGMII;
   12375 		mediatype = WM_MEDIATYPE_SERDES;
   12376 	}
   12377 
   12378 out:
   12379 	/* Restore I2C interface setting */
   12380 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12381 
   12382 	return mediatype;
   12383 }
   12384 
   12385 /*
   12386  * NVM related.
   12387  * Microwire, SPI (w/wo EERD) and Flash.
   12388  */
   12389 
   12390 /* Both spi and uwire */
   12391 
   12392 /*
   12393  * wm_eeprom_sendbits:
   12394  *
   12395  *	Send a series of bits to the EEPROM.
   12396  */
   12397 static void
   12398 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12399 {
   12400 	uint32_t reg;
   12401 	int x;
   12402 
   12403 	reg = CSR_READ(sc, WMREG_EECD);
   12404 
   12405 	for (x = nbits; x > 0; x--) {
   12406 		if (bits & (1U << (x - 1)))
   12407 			reg |= EECD_DI;
   12408 		else
   12409 			reg &= ~EECD_DI;
   12410 		CSR_WRITE(sc, WMREG_EECD, reg);
   12411 		CSR_WRITE_FLUSH(sc);
   12412 		delay(2);
   12413 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12414 		CSR_WRITE_FLUSH(sc);
   12415 		delay(2);
   12416 		CSR_WRITE(sc, WMREG_EECD, reg);
   12417 		CSR_WRITE_FLUSH(sc);
   12418 		delay(2);
   12419 	}
   12420 }
   12421 
   12422 /*
   12423  * wm_eeprom_recvbits:
   12424  *
   12425  *	Receive a series of bits from the EEPROM.
   12426  */
   12427 static void
   12428 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12429 {
   12430 	uint32_t reg, val;
   12431 	int x;
   12432 
   12433 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12434 
   12435 	val = 0;
   12436 	for (x = nbits; x > 0; x--) {
   12437 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12438 		CSR_WRITE_FLUSH(sc);
   12439 		delay(2);
   12440 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12441 			val |= (1U << (x - 1));
   12442 		CSR_WRITE(sc, WMREG_EECD, reg);
   12443 		CSR_WRITE_FLUSH(sc);
   12444 		delay(2);
   12445 	}
   12446 	*valp = val;
   12447 }
   12448 
   12449 /* Microwire */
   12450 
   12451 /*
   12452  * wm_nvm_read_uwire:
   12453  *
   12454  *	Read a word from the EEPROM using the MicroWire protocol.
   12455  */
   12456 static int
   12457 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12458 {
   12459 	uint32_t reg, val;
   12460 	int i;
   12461 
   12462 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12463 		device_xname(sc->sc_dev), __func__));
   12464 
   12465 	if (sc->nvm.acquire(sc) != 0)
   12466 		return -1;
   12467 
   12468 	for (i = 0; i < wordcnt; i++) {
   12469 		/* Clear SK and DI. */
   12470 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12471 		CSR_WRITE(sc, WMREG_EECD, reg);
   12472 
   12473 		/*
   12474 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12475 		 * and Xen.
   12476 		 *
   12477 		 * We use this workaround only for 82540 because qemu's
   12478 		 * e1000 act as 82540.
   12479 		 */
   12480 		if (sc->sc_type == WM_T_82540) {
   12481 			reg |= EECD_SK;
   12482 			CSR_WRITE(sc, WMREG_EECD, reg);
   12483 			reg &= ~EECD_SK;
   12484 			CSR_WRITE(sc, WMREG_EECD, reg);
   12485 			CSR_WRITE_FLUSH(sc);
   12486 			delay(2);
   12487 		}
   12488 		/* XXX: end of workaround */
   12489 
   12490 		/* Set CHIP SELECT. */
   12491 		reg |= EECD_CS;
   12492 		CSR_WRITE(sc, WMREG_EECD, reg);
   12493 		CSR_WRITE_FLUSH(sc);
   12494 		delay(2);
   12495 
   12496 		/* Shift in the READ command. */
   12497 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12498 
   12499 		/* Shift in address. */
   12500 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12501 
   12502 		/* Shift out the data. */
   12503 		wm_eeprom_recvbits(sc, &val, 16);
   12504 		data[i] = val & 0xffff;
   12505 
   12506 		/* Clear CHIP SELECT. */
   12507 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12508 		CSR_WRITE(sc, WMREG_EECD, reg);
   12509 		CSR_WRITE_FLUSH(sc);
   12510 		delay(2);
   12511 	}
   12512 
   12513 	sc->nvm.release(sc);
   12514 	return 0;
   12515 }
   12516 
   12517 /* SPI */
   12518 
   12519 /*
   12520  * Set SPI and FLASH related information from the EECD register.
   12521  * For 82541 and 82547, the word size is taken from EEPROM.
   12522  */
   12523 static int
   12524 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12525 {
   12526 	int size;
   12527 	uint32_t reg;
   12528 	uint16_t data;
   12529 
   12530 	reg = CSR_READ(sc, WMREG_EECD);
   12531 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12532 
   12533 	/* Read the size of NVM from EECD by default */
   12534 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12535 	switch (sc->sc_type) {
   12536 	case WM_T_82541:
   12537 	case WM_T_82541_2:
   12538 	case WM_T_82547:
   12539 	case WM_T_82547_2:
   12540 		/* Set dummy value to access EEPROM */
   12541 		sc->sc_nvm_wordsize = 64;
   12542 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12543 			aprint_error_dev(sc->sc_dev,
   12544 			    "%s: failed to read EEPROM size\n", __func__);
   12545 		}
   12546 		reg = data;
   12547 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12548 		if (size == 0)
   12549 			size = 6; /* 64 word size */
   12550 		else
   12551 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12552 		break;
   12553 	case WM_T_80003:
   12554 	case WM_T_82571:
   12555 	case WM_T_82572:
   12556 	case WM_T_82573: /* SPI case */
   12557 	case WM_T_82574: /* SPI case */
   12558 	case WM_T_82583: /* SPI case */
   12559 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12560 		if (size > 14)
   12561 			size = 14;
   12562 		break;
   12563 	case WM_T_82575:
   12564 	case WM_T_82576:
   12565 	case WM_T_82580:
   12566 	case WM_T_I350:
   12567 	case WM_T_I354:
   12568 	case WM_T_I210:
   12569 	case WM_T_I211:
   12570 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12571 		if (size > 15)
   12572 			size = 15;
   12573 		break;
   12574 	default:
   12575 		aprint_error_dev(sc->sc_dev,
   12576 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12577 		return -1;
   12578 		break;
   12579 	}
   12580 
   12581 	sc->sc_nvm_wordsize = 1 << size;
   12582 
   12583 	return 0;
   12584 }
   12585 
   12586 /*
   12587  * wm_nvm_ready_spi:
   12588  *
   12589  *	Wait for a SPI EEPROM to be ready for commands.
   12590  */
   12591 static int
   12592 wm_nvm_ready_spi(struct wm_softc *sc)
   12593 {
   12594 	uint32_t val;
   12595 	int usec;
   12596 
   12597 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12598 		device_xname(sc->sc_dev), __func__));
   12599 
   12600 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12601 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12602 		wm_eeprom_recvbits(sc, &val, 8);
   12603 		if ((val & SPI_SR_RDY) == 0)
   12604 			break;
   12605 	}
   12606 	if (usec >= SPI_MAX_RETRIES) {
   12607 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12608 		return -1;
   12609 	}
   12610 	return 0;
   12611 }
   12612 
   12613 /*
   12614  * wm_nvm_read_spi:
   12615  *
   12616  *	Read a work from the EEPROM using the SPI protocol.
   12617  */
   12618 static int
   12619 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12620 {
   12621 	uint32_t reg, val;
   12622 	int i;
   12623 	uint8_t opc;
   12624 	int rv = 0;
   12625 
   12626 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12627 		device_xname(sc->sc_dev), __func__));
   12628 
   12629 	if (sc->nvm.acquire(sc) != 0)
   12630 		return -1;
   12631 
   12632 	/* Clear SK and CS. */
   12633 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12634 	CSR_WRITE(sc, WMREG_EECD, reg);
   12635 	CSR_WRITE_FLUSH(sc);
   12636 	delay(2);
   12637 
   12638 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12639 		goto out;
   12640 
   12641 	/* Toggle CS to flush commands. */
   12642 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12643 	CSR_WRITE_FLUSH(sc);
   12644 	delay(2);
   12645 	CSR_WRITE(sc, WMREG_EECD, reg);
   12646 	CSR_WRITE_FLUSH(sc);
   12647 	delay(2);
   12648 
   12649 	opc = SPI_OPC_READ;
   12650 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12651 		opc |= SPI_OPC_A8;
   12652 
   12653 	wm_eeprom_sendbits(sc, opc, 8);
   12654 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12655 
   12656 	for (i = 0; i < wordcnt; i++) {
   12657 		wm_eeprom_recvbits(sc, &val, 16);
   12658 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12659 	}
   12660 
   12661 	/* Raise CS and clear SK. */
   12662 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12663 	CSR_WRITE(sc, WMREG_EECD, reg);
   12664 	CSR_WRITE_FLUSH(sc);
   12665 	delay(2);
   12666 
   12667 out:
   12668 	sc->nvm.release(sc);
   12669 	return rv;
   12670 }
   12671 
   12672 /* Using with EERD */
   12673 
   12674 static int
   12675 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12676 {
   12677 	uint32_t attempts = 100000;
   12678 	uint32_t i, reg = 0;
   12679 	int32_t done = -1;
   12680 
   12681 	for (i = 0; i < attempts; i++) {
   12682 		reg = CSR_READ(sc, rw);
   12683 
   12684 		if (reg & EERD_DONE) {
   12685 			done = 0;
   12686 			break;
   12687 		}
   12688 		delay(5);
   12689 	}
   12690 
   12691 	return done;
   12692 }
   12693 
   12694 static int
   12695 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12696 {
   12697 	int i, eerd = 0;
   12698 	int rv = 0;
   12699 
   12700 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12701 		device_xname(sc->sc_dev), __func__));
   12702 
   12703 	if (sc->nvm.acquire(sc) != 0)
   12704 		return -1;
   12705 
   12706 	for (i = 0; i < wordcnt; i++) {
   12707 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12708 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12709 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12710 		if (rv != 0) {
   12711 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12712 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12713 			break;
   12714 		}
   12715 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12716 	}
   12717 
   12718 	sc->nvm.release(sc);
   12719 	return rv;
   12720 }
   12721 
   12722 /* Flash */
   12723 
   12724 static int
   12725 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12726 {
   12727 	uint32_t eecd;
   12728 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12729 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12730 	uint32_t nvm_dword = 0;
   12731 	uint8_t sig_byte = 0;
   12732 	int rv;
   12733 
   12734 	switch (sc->sc_type) {
   12735 	case WM_T_PCH_SPT:
   12736 	case WM_T_PCH_CNP:
   12737 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12738 		act_offset = ICH_NVM_SIG_WORD * 2;
   12739 
   12740 		/* set bank to 0 in case flash read fails. */
   12741 		*bank = 0;
   12742 
   12743 		/* Check bank 0 */
   12744 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12745 		if (rv != 0)
   12746 			return rv;
   12747 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12748 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12749 			*bank = 0;
   12750 			return 0;
   12751 		}
   12752 
   12753 		/* Check bank 1 */
   12754 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12755 		    &nvm_dword);
   12756 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12757 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12758 			*bank = 1;
   12759 			return 0;
   12760 		}
   12761 		aprint_error_dev(sc->sc_dev,
   12762 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12763 		return -1;
   12764 	case WM_T_ICH8:
   12765 	case WM_T_ICH9:
   12766 		eecd = CSR_READ(sc, WMREG_EECD);
   12767 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12768 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12769 			return 0;
   12770 		}
   12771 		/* FALLTHROUGH */
   12772 	default:
   12773 		/* Default to 0 */
   12774 		*bank = 0;
   12775 
   12776 		/* Check bank 0 */
   12777 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12778 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12779 			*bank = 0;
   12780 			return 0;
   12781 		}
   12782 
   12783 		/* Check bank 1 */
   12784 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12785 		    &sig_byte);
   12786 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12787 			*bank = 1;
   12788 			return 0;
   12789 		}
   12790 	}
   12791 
   12792 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12793 		device_xname(sc->sc_dev)));
   12794 	return -1;
   12795 }
   12796 
   12797 /******************************************************************************
   12798  * This function does initial flash setup so that a new read/write/erase cycle
   12799  * can be started.
   12800  *
   12801  * sc - The pointer to the hw structure
   12802  ****************************************************************************/
   12803 static int32_t
   12804 wm_ich8_cycle_init(struct wm_softc *sc)
   12805 {
   12806 	uint16_t hsfsts;
   12807 	int32_t error = 1;
   12808 	int32_t i     = 0;
   12809 
   12810 	if (sc->sc_type >= WM_T_PCH_SPT)
   12811 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12812 	else
   12813 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12814 
   12815 	/* May be check the Flash Des Valid bit in Hw status */
   12816 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12817 		return error;
   12818 
   12819 	/* Clear FCERR in Hw status by writing 1 */
   12820 	/* Clear DAEL in Hw status by writing a 1 */
   12821 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12822 
   12823 	if (sc->sc_type >= WM_T_PCH_SPT)
   12824 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12825 	else
   12826 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12827 
   12828 	/*
   12829 	 * Either we should have a hardware SPI cycle in progress bit to check
   12830 	 * against, in order to start a new cycle or FDONE bit should be
   12831 	 * changed in the hardware so that it is 1 after harware reset, which
   12832 	 * can then be used as an indication whether a cycle is in progress or
   12833 	 * has been completed .. we should also have some software semaphore
   12834 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12835 	 * threads access to those bits can be sequentiallized or a way so that
   12836 	 * 2 threads dont start the cycle at the same time
   12837 	 */
   12838 
   12839 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12840 		/*
   12841 		 * There is no cycle running at present, so we can start a
   12842 		 * cycle
   12843 		 */
   12844 
   12845 		/* Begin by setting Flash Cycle Done. */
   12846 		hsfsts |= HSFSTS_DONE;
   12847 		if (sc->sc_type >= WM_T_PCH_SPT)
   12848 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12849 			    hsfsts & 0xffffUL);
   12850 		else
   12851 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12852 		error = 0;
   12853 	} else {
   12854 		/*
   12855 		 * otherwise poll for sometime so the current cycle has a
   12856 		 * chance to end before giving up.
   12857 		 */
   12858 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12859 			if (sc->sc_type >= WM_T_PCH_SPT)
   12860 				hsfsts = ICH8_FLASH_READ32(sc,
   12861 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12862 			else
   12863 				hsfsts = ICH8_FLASH_READ16(sc,
   12864 				    ICH_FLASH_HSFSTS);
   12865 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12866 				error = 0;
   12867 				break;
   12868 			}
   12869 			delay(1);
   12870 		}
   12871 		if (error == 0) {
   12872 			/*
   12873 			 * Successful in waiting for previous cycle to timeout,
   12874 			 * now set the Flash Cycle Done.
   12875 			 */
   12876 			hsfsts |= HSFSTS_DONE;
   12877 			if (sc->sc_type >= WM_T_PCH_SPT)
   12878 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12879 				    hsfsts & 0xffffUL);
   12880 			else
   12881 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12882 				    hsfsts);
   12883 		}
   12884 	}
   12885 	return error;
   12886 }
   12887 
   12888 /******************************************************************************
   12889  * This function starts a flash cycle and waits for its completion
   12890  *
   12891  * sc - The pointer to the hw structure
   12892  ****************************************************************************/
   12893 static int32_t
   12894 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12895 {
   12896 	uint16_t hsflctl;
   12897 	uint16_t hsfsts;
   12898 	int32_t error = 1;
   12899 	uint32_t i = 0;
   12900 
   12901 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12902 	if (sc->sc_type >= WM_T_PCH_SPT)
   12903 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12904 	else
   12905 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12906 	hsflctl |= HSFCTL_GO;
   12907 	if (sc->sc_type >= WM_T_PCH_SPT)
   12908 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12909 		    (uint32_t)hsflctl << 16);
   12910 	else
   12911 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12912 
   12913 	/* Wait till FDONE bit is set to 1 */
   12914 	do {
   12915 		if (sc->sc_type >= WM_T_PCH_SPT)
   12916 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12917 			    & 0xffffUL;
   12918 		else
   12919 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12920 		if (hsfsts & HSFSTS_DONE)
   12921 			break;
   12922 		delay(1);
   12923 		i++;
   12924 	} while (i < timeout);
   12925 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12926 		error = 0;
   12927 
   12928 	return error;
   12929 }
   12930 
   12931 /******************************************************************************
   12932  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12933  *
   12934  * sc - The pointer to the hw structure
   12935  * index - The index of the byte or word to read.
   12936  * size - Size of data to read, 1=byte 2=word, 4=dword
   12937  * data - Pointer to the word to store the value read.
   12938  *****************************************************************************/
   12939 static int32_t
   12940 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12941     uint32_t size, uint32_t *data)
   12942 {
   12943 	uint16_t hsfsts;
   12944 	uint16_t hsflctl;
   12945 	uint32_t flash_linear_address;
   12946 	uint32_t flash_data = 0;
   12947 	int32_t error = 1;
   12948 	int32_t count = 0;
   12949 
   12950 	if (size < 1  || size > 4 || data == 0x0 ||
   12951 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12952 		return error;
   12953 
   12954 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12955 	    sc->sc_ich8_flash_base;
   12956 
   12957 	do {
   12958 		delay(1);
   12959 		/* Steps */
   12960 		error = wm_ich8_cycle_init(sc);
   12961 		if (error)
   12962 			break;
   12963 
   12964 		if (sc->sc_type >= WM_T_PCH_SPT)
   12965 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12966 			    >> 16;
   12967 		else
   12968 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12969 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12970 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12971 		    & HSFCTL_BCOUNT_MASK;
   12972 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12973 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12974 			/*
   12975 			 * In SPT, This register is in Lan memory space, not
   12976 			 * flash. Therefore, only 32 bit access is supported.
   12977 			 */
   12978 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12979 			    (uint32_t)hsflctl << 16);
   12980 		} else
   12981 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12982 
   12983 		/*
   12984 		 * Write the last 24 bits of index into Flash Linear address
   12985 		 * field in Flash Address
   12986 		 */
   12987 		/* TODO: TBD maybe check the index against the size of flash */
   12988 
   12989 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12990 
   12991 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12992 
   12993 		/*
   12994 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12995 		 * the whole sequence a few more times, else read in (shift in)
   12996 		 * the Flash Data0, the order is least significant byte first
   12997 		 * msb to lsb
   12998 		 */
   12999 		if (error == 0) {
   13000 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13001 			if (size == 1)
   13002 				*data = (uint8_t)(flash_data & 0x000000FF);
   13003 			else if (size == 2)
   13004 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13005 			else if (size == 4)
   13006 				*data = (uint32_t)flash_data;
   13007 			break;
   13008 		} else {
   13009 			/*
   13010 			 * If we've gotten here, then things are probably
   13011 			 * completely hosed, but if the error condition is
   13012 			 * detected, it won't hurt to give it another try...
   13013 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13014 			 */
   13015 			if (sc->sc_type >= WM_T_PCH_SPT)
   13016 				hsfsts = ICH8_FLASH_READ32(sc,
   13017 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13018 			else
   13019 				hsfsts = ICH8_FLASH_READ16(sc,
   13020 				    ICH_FLASH_HSFSTS);
   13021 
   13022 			if (hsfsts & HSFSTS_ERR) {
   13023 				/* Repeat for some time before giving up. */
   13024 				continue;
   13025 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13026 				break;
   13027 		}
   13028 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13029 
   13030 	return error;
   13031 }
   13032 
   13033 /******************************************************************************
   13034  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13035  *
   13036  * sc - pointer to wm_hw structure
   13037  * index - The index of the byte to read.
   13038  * data - Pointer to a byte to store the value read.
   13039  *****************************************************************************/
   13040 static int32_t
   13041 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13042 {
   13043 	int32_t status;
   13044 	uint32_t word = 0;
   13045 
   13046 	status = wm_read_ich8_data(sc, index, 1, &word);
   13047 	if (status == 0)
   13048 		*data = (uint8_t)word;
   13049 	else
   13050 		*data = 0;
   13051 
   13052 	return status;
   13053 }
   13054 
   13055 /******************************************************************************
   13056  * Reads a word from the NVM using the ICH8 flash access registers.
   13057  *
   13058  * sc - pointer to wm_hw structure
   13059  * index - The starting byte index of the word to read.
   13060  * data - Pointer to a word to store the value read.
   13061  *****************************************************************************/
   13062 static int32_t
   13063 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13064 {
   13065 	int32_t status;
   13066 	uint32_t word = 0;
   13067 
   13068 	status = wm_read_ich8_data(sc, index, 2, &word);
   13069 	if (status == 0)
   13070 		*data = (uint16_t)word;
   13071 	else
   13072 		*data = 0;
   13073 
   13074 	return status;
   13075 }
   13076 
   13077 /******************************************************************************
   13078  * Reads a dword from the NVM using the ICH8 flash access registers.
   13079  *
   13080  * sc - pointer to wm_hw structure
   13081  * index - The starting byte index of the word to read.
   13082  * data - Pointer to a word to store the value read.
   13083  *****************************************************************************/
   13084 static int32_t
   13085 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13086 {
   13087 	int32_t status;
   13088 
   13089 	status = wm_read_ich8_data(sc, index, 4, data);
   13090 	return status;
   13091 }
   13092 
   13093 /******************************************************************************
   13094  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13095  * register.
   13096  *
   13097  * sc - Struct containing variables accessed by shared code
   13098  * offset - offset of word in the EEPROM to read
   13099  * data - word read from the EEPROM
   13100  * words - number of words to read
   13101  *****************************************************************************/
   13102 static int
   13103 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13104 {
   13105 	int32_t	 rv = 0;
   13106 	uint32_t flash_bank = 0;
   13107 	uint32_t act_offset = 0;
   13108 	uint32_t bank_offset = 0;
   13109 	uint16_t word = 0;
   13110 	uint16_t i = 0;
   13111 
   13112 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13113 		device_xname(sc->sc_dev), __func__));
   13114 
   13115 	if (sc->nvm.acquire(sc) != 0)
   13116 		return -1;
   13117 
   13118 	/*
   13119 	 * We need to know which is the valid flash bank.  In the event
   13120 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13121 	 * managing flash_bank. So it cannot be trusted and needs
   13122 	 * to be updated with each read.
   13123 	 */
   13124 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13125 	if (rv) {
   13126 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13127 			device_xname(sc->sc_dev)));
   13128 		flash_bank = 0;
   13129 	}
   13130 
   13131 	/*
   13132 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13133 	 * size
   13134 	 */
   13135 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13136 
   13137 	for (i = 0; i < words; i++) {
   13138 		/* The NVM part needs a byte offset, hence * 2 */
   13139 		act_offset = bank_offset + ((offset + i) * 2);
   13140 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13141 		if (rv) {
   13142 			aprint_error_dev(sc->sc_dev,
   13143 			    "%s: failed to read NVM\n", __func__);
   13144 			break;
   13145 		}
   13146 		data[i] = word;
   13147 	}
   13148 
   13149 	sc->nvm.release(sc);
   13150 	return rv;
   13151 }
   13152 
   13153 /******************************************************************************
   13154  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13155  * register.
   13156  *
   13157  * sc - Struct containing variables accessed by shared code
   13158  * offset - offset of word in the EEPROM to read
   13159  * data - word read from the EEPROM
   13160  * words - number of words to read
   13161  *****************************************************************************/
   13162 static int
   13163 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13164 {
   13165 	int32_t	 rv = 0;
   13166 	uint32_t flash_bank = 0;
   13167 	uint32_t act_offset = 0;
   13168 	uint32_t bank_offset = 0;
   13169 	uint32_t dword = 0;
   13170 	uint16_t i = 0;
   13171 
   13172 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13173 		device_xname(sc->sc_dev), __func__));
   13174 
   13175 	if (sc->nvm.acquire(sc) != 0)
   13176 		return -1;
   13177 
   13178 	/*
   13179 	 * We need to know which is the valid flash bank.  In the event
   13180 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13181 	 * managing flash_bank. So it cannot be trusted and needs
   13182 	 * to be updated with each read.
   13183 	 */
   13184 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13185 	if (rv) {
   13186 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13187 			device_xname(sc->sc_dev)));
   13188 		flash_bank = 0;
   13189 	}
   13190 
   13191 	/*
   13192 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13193 	 * size
   13194 	 */
   13195 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13196 
   13197 	for (i = 0; i < words; i++) {
   13198 		/* The NVM part needs a byte offset, hence * 2 */
   13199 		act_offset = bank_offset + ((offset + i) * 2);
   13200 		/* but we must read dword aligned, so mask ... */
   13201 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13202 		if (rv) {
   13203 			aprint_error_dev(sc->sc_dev,
   13204 			    "%s: failed to read NVM\n", __func__);
   13205 			break;
   13206 		}
   13207 		/* ... and pick out low or high word */
   13208 		if ((act_offset & 0x2) == 0)
   13209 			data[i] = (uint16_t)(dword & 0xFFFF);
   13210 		else
   13211 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13212 	}
   13213 
   13214 	sc->nvm.release(sc);
   13215 	return rv;
   13216 }
   13217 
   13218 /* iNVM */
   13219 
   13220 static int
   13221 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13222 {
   13223 	int32_t	 rv = 0;
   13224 	uint32_t invm_dword;
   13225 	uint16_t i;
   13226 	uint8_t record_type, word_address;
   13227 
   13228 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13229 		device_xname(sc->sc_dev), __func__));
   13230 
   13231 	for (i = 0; i < INVM_SIZE; i++) {
   13232 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13233 		/* Get record type */
   13234 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13235 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13236 			break;
   13237 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13238 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13239 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13240 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13241 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13242 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13243 			if (word_address == address) {
   13244 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13245 				rv = 0;
   13246 				break;
   13247 			}
   13248 		}
   13249 	}
   13250 
   13251 	return rv;
   13252 }
   13253 
   13254 static int
   13255 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13256 {
   13257 	int rv = 0;
   13258 	int i;
   13259 
   13260 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13261 		device_xname(sc->sc_dev), __func__));
   13262 
   13263 	if (sc->nvm.acquire(sc) != 0)
   13264 		return -1;
   13265 
   13266 	for (i = 0; i < words; i++) {
   13267 		switch (offset + i) {
   13268 		case NVM_OFF_MACADDR:
   13269 		case NVM_OFF_MACADDR1:
   13270 		case NVM_OFF_MACADDR2:
   13271 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13272 			if (rv != 0) {
   13273 				data[i] = 0xffff;
   13274 				rv = -1;
   13275 			}
   13276 			break;
   13277 		case NVM_OFF_CFG2:
   13278 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13279 			if (rv != 0) {
   13280 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13281 				rv = 0;
   13282 			}
   13283 			break;
   13284 		case NVM_OFF_CFG4:
   13285 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13286 			if (rv != 0) {
   13287 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13288 				rv = 0;
   13289 			}
   13290 			break;
   13291 		case NVM_OFF_LED_1_CFG:
   13292 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13293 			if (rv != 0) {
   13294 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13295 				rv = 0;
   13296 			}
   13297 			break;
   13298 		case NVM_OFF_LED_0_2_CFG:
   13299 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13300 			if (rv != 0) {
   13301 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13302 				rv = 0;
   13303 			}
   13304 			break;
   13305 		case NVM_OFF_ID_LED_SETTINGS:
   13306 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13307 			if (rv != 0) {
   13308 				*data = ID_LED_RESERVED_FFFF;
   13309 				rv = 0;
   13310 			}
   13311 			break;
   13312 		default:
   13313 			DPRINTF(WM_DEBUG_NVM,
   13314 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13315 			*data = NVM_RESERVED_WORD;
   13316 			break;
   13317 		}
   13318 	}
   13319 
   13320 	sc->nvm.release(sc);
   13321 	return rv;
   13322 }
   13323 
   13324 /* Lock, detecting NVM type, validate checksum, version and read */
   13325 
   13326 static int
   13327 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13328 {
   13329 	uint32_t eecd = 0;
   13330 
   13331 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13332 	    || sc->sc_type == WM_T_82583) {
   13333 		eecd = CSR_READ(sc, WMREG_EECD);
   13334 
   13335 		/* Isolate bits 15 & 16 */
   13336 		eecd = ((eecd >> 15) & 0x03);
   13337 
   13338 		/* If both bits are set, device is Flash type */
   13339 		if (eecd == 0x03)
   13340 			return 0;
   13341 	}
   13342 	return 1;
   13343 }
   13344 
   13345 static int
   13346 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13347 {
   13348 	uint32_t eec;
   13349 
   13350 	eec = CSR_READ(sc, WMREG_EEC);
   13351 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13352 		return 1;
   13353 
   13354 	return 0;
   13355 }
   13356 
   13357 /*
   13358  * wm_nvm_validate_checksum
   13359  *
   13360  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13361  */
   13362 static int
   13363 wm_nvm_validate_checksum(struct wm_softc *sc)
   13364 {
   13365 	uint16_t checksum;
   13366 	uint16_t eeprom_data;
   13367 #ifdef WM_DEBUG
   13368 	uint16_t csum_wordaddr, valid_checksum;
   13369 #endif
   13370 	int i;
   13371 
   13372 	checksum = 0;
   13373 
   13374 	/* Don't check for I211 */
   13375 	if (sc->sc_type == WM_T_I211)
   13376 		return 0;
   13377 
   13378 #ifdef WM_DEBUG
   13379 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13380 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13381 		csum_wordaddr = NVM_OFF_COMPAT;
   13382 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13383 	} else {
   13384 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13385 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13386 	}
   13387 
   13388 	/* Dump EEPROM image for debug */
   13389 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13390 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13391 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13392 		/* XXX PCH_SPT? */
   13393 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13394 		if ((eeprom_data & valid_checksum) == 0)
   13395 			DPRINTF(WM_DEBUG_NVM,
   13396 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13397 				device_xname(sc->sc_dev), eeprom_data,
   13398 				    valid_checksum));
   13399 	}
   13400 
   13401 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13402 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13403 		for (i = 0; i < NVM_SIZE; i++) {
   13404 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13405 				printf("XXXX ");
   13406 			else
   13407 				printf("%04hx ", eeprom_data);
   13408 			if (i % 8 == 7)
   13409 				printf("\n");
   13410 		}
   13411 	}
   13412 
   13413 #endif /* WM_DEBUG */
   13414 
   13415 	for (i = 0; i < NVM_SIZE; i++) {
   13416 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13417 			return 1;
   13418 		checksum += eeprom_data;
   13419 	}
   13420 
   13421 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13422 #ifdef WM_DEBUG
   13423 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13424 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13425 #endif
   13426 	}
   13427 
   13428 	return 0;
   13429 }
   13430 
   13431 static void
   13432 wm_nvm_version_invm(struct wm_softc *sc)
   13433 {
   13434 	uint32_t dword;
   13435 
   13436 	/*
   13437 	 * Linux's code to decode version is very strange, so we don't
   13438 	 * obey that algorithm and just use word 61 as the document.
   13439 	 * Perhaps it's not perfect though...
   13440 	 *
   13441 	 * Example:
   13442 	 *
   13443 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13444 	 */
   13445 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13446 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13447 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13448 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13449 }
   13450 
   13451 static void
   13452 wm_nvm_version(struct wm_softc *sc)
   13453 {
   13454 	uint16_t major, minor, build, patch;
   13455 	uint16_t uid0, uid1;
   13456 	uint16_t nvm_data;
   13457 	uint16_t off;
   13458 	bool check_version = false;
   13459 	bool check_optionrom = false;
   13460 	bool have_build = false;
   13461 	bool have_uid = true;
   13462 
   13463 	/*
   13464 	 * Version format:
   13465 	 *
   13466 	 * XYYZ
   13467 	 * X0YZ
   13468 	 * X0YY
   13469 	 *
   13470 	 * Example:
   13471 	 *
   13472 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13473 	 *	82571	0x50a6	5.10.6?
   13474 	 *	82572	0x506a	5.6.10?
   13475 	 *	82572EI	0x5069	5.6.9?
   13476 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13477 	 *		0x2013	2.1.3?
   13478 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   13479 	 */
   13480 
   13481 	/*
   13482 	 * XXX
   13483 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13484 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13485 	 */
   13486 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13487 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13488 		have_uid = false;
   13489 
   13490 	switch (sc->sc_type) {
   13491 	case WM_T_82571:
   13492 	case WM_T_82572:
   13493 	case WM_T_82574:
   13494 	case WM_T_82583:
   13495 		check_version = true;
   13496 		check_optionrom = true;
   13497 		have_build = true;
   13498 		break;
   13499 	case WM_T_82575:
   13500 	case WM_T_82576:
   13501 	case WM_T_82580:
   13502 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13503 			check_version = true;
   13504 		break;
   13505 	case WM_T_I211:
   13506 		wm_nvm_version_invm(sc);
   13507 		have_uid = false;
   13508 		goto printver;
   13509 	case WM_T_I210:
   13510 		if (!wm_nvm_flash_presence_i210(sc)) {
   13511 			wm_nvm_version_invm(sc);
   13512 			have_uid = false;
   13513 			goto printver;
   13514 		}
   13515 		/* FALLTHROUGH */
   13516 	case WM_T_I350:
   13517 	case WM_T_I354:
   13518 		check_version = true;
   13519 		check_optionrom = true;
   13520 		break;
   13521 	default:
   13522 		return;
   13523 	}
   13524 	if (check_version
   13525 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13526 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13527 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13528 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13529 			build = nvm_data & NVM_BUILD_MASK;
   13530 			have_build = true;
   13531 		} else
   13532 			minor = nvm_data & 0x00ff;
   13533 
   13534 		/* Decimal */
   13535 		minor = (minor / 16) * 10 + (minor % 16);
   13536 		sc->sc_nvm_ver_major = major;
   13537 		sc->sc_nvm_ver_minor = minor;
   13538 
   13539 printver:
   13540 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13541 		    sc->sc_nvm_ver_minor);
   13542 		if (have_build) {
   13543 			sc->sc_nvm_ver_build = build;
   13544 			aprint_verbose(".%d", build);
   13545 		}
   13546 	}
   13547 
   13548 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13549 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13550 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13551 		/* Option ROM Version */
   13552 		if ((off != 0x0000) && (off != 0xffff)) {
   13553 			int rv;
   13554 
   13555 			off += NVM_COMBO_VER_OFF;
   13556 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13557 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13558 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13559 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13560 				/* 16bits */
   13561 				major = uid0 >> 8;
   13562 				build = (uid0 << 8) | (uid1 >> 8);
   13563 				patch = uid1 & 0x00ff;
   13564 				aprint_verbose(", option ROM Version %d.%d.%d",
   13565 				    major, build, patch);
   13566 			}
   13567 		}
   13568 	}
   13569 
   13570 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13571 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13572 }
   13573 
   13574 /*
   13575  * wm_nvm_read:
   13576  *
   13577  *	Read data from the serial EEPROM.
   13578  */
   13579 static int
   13580 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13581 {
   13582 	int rv;
   13583 
   13584 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13585 		device_xname(sc->sc_dev), __func__));
   13586 
   13587 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13588 		return -1;
   13589 
   13590 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13591 
   13592 	return rv;
   13593 }
   13594 
   13595 /*
   13596  * Hardware semaphores.
   13597  * Very complexed...
   13598  */
   13599 
   13600 static int
   13601 wm_get_null(struct wm_softc *sc)
   13602 {
   13603 
   13604 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13605 		device_xname(sc->sc_dev), __func__));
   13606 	return 0;
   13607 }
   13608 
   13609 static void
   13610 wm_put_null(struct wm_softc *sc)
   13611 {
   13612 
   13613 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13614 		device_xname(sc->sc_dev), __func__));
   13615 	return;
   13616 }
   13617 
   13618 static int
   13619 wm_get_eecd(struct wm_softc *sc)
   13620 {
   13621 	uint32_t reg;
   13622 	int x;
   13623 
   13624 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13625 		device_xname(sc->sc_dev), __func__));
   13626 
   13627 	reg = CSR_READ(sc, WMREG_EECD);
   13628 
   13629 	/* Request EEPROM access. */
   13630 	reg |= EECD_EE_REQ;
   13631 	CSR_WRITE(sc, WMREG_EECD, reg);
   13632 
   13633 	/* ..and wait for it to be granted. */
   13634 	for (x = 0; x < 1000; x++) {
   13635 		reg = CSR_READ(sc, WMREG_EECD);
   13636 		if (reg & EECD_EE_GNT)
   13637 			break;
   13638 		delay(5);
   13639 	}
   13640 	if ((reg & EECD_EE_GNT) == 0) {
   13641 		aprint_error_dev(sc->sc_dev,
   13642 		    "could not acquire EEPROM GNT\n");
   13643 		reg &= ~EECD_EE_REQ;
   13644 		CSR_WRITE(sc, WMREG_EECD, reg);
   13645 		return -1;
   13646 	}
   13647 
   13648 	return 0;
   13649 }
   13650 
   13651 static void
   13652 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13653 {
   13654 
   13655 	*eecd |= EECD_SK;
   13656 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13657 	CSR_WRITE_FLUSH(sc);
   13658 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13659 		delay(1);
   13660 	else
   13661 		delay(50);
   13662 }
   13663 
   13664 static void
   13665 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13666 {
   13667 
   13668 	*eecd &= ~EECD_SK;
   13669 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13670 	CSR_WRITE_FLUSH(sc);
   13671 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13672 		delay(1);
   13673 	else
   13674 		delay(50);
   13675 }
   13676 
   13677 static void
   13678 wm_put_eecd(struct wm_softc *sc)
   13679 {
   13680 	uint32_t reg;
   13681 
   13682 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13683 		device_xname(sc->sc_dev), __func__));
   13684 
   13685 	/* Stop nvm */
   13686 	reg = CSR_READ(sc, WMREG_EECD);
   13687 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13688 		/* Pull CS high */
   13689 		reg |= EECD_CS;
   13690 		wm_nvm_eec_clock_lower(sc, &reg);
   13691 	} else {
   13692 		/* CS on Microwire is active-high */
   13693 		reg &= ~(EECD_CS | EECD_DI);
   13694 		CSR_WRITE(sc, WMREG_EECD, reg);
   13695 		wm_nvm_eec_clock_raise(sc, &reg);
   13696 		wm_nvm_eec_clock_lower(sc, &reg);
   13697 	}
   13698 
   13699 	reg = CSR_READ(sc, WMREG_EECD);
   13700 	reg &= ~EECD_EE_REQ;
   13701 	CSR_WRITE(sc, WMREG_EECD, reg);
   13702 
   13703 	return;
   13704 }
   13705 
   13706 /*
   13707  * Get hardware semaphore.
   13708  * Same as e1000_get_hw_semaphore_generic()
   13709  */
   13710 static int
   13711 wm_get_swsm_semaphore(struct wm_softc *sc)
   13712 {
   13713 	int32_t timeout;
   13714 	uint32_t swsm;
   13715 
   13716 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13717 		device_xname(sc->sc_dev), __func__));
   13718 	KASSERT(sc->sc_nvm_wordsize > 0);
   13719 
   13720 retry:
   13721 	/* Get the SW semaphore. */
   13722 	timeout = sc->sc_nvm_wordsize + 1;
   13723 	while (timeout) {
   13724 		swsm = CSR_READ(sc, WMREG_SWSM);
   13725 
   13726 		if ((swsm & SWSM_SMBI) == 0)
   13727 			break;
   13728 
   13729 		delay(50);
   13730 		timeout--;
   13731 	}
   13732 
   13733 	if (timeout == 0) {
   13734 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13735 			/*
   13736 			 * In rare circumstances, the SW semaphore may already
   13737 			 * be held unintentionally. Clear the semaphore once
   13738 			 * before giving up.
   13739 			 */
   13740 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13741 			wm_put_swsm_semaphore(sc);
   13742 			goto retry;
   13743 		}
   13744 		aprint_error_dev(sc->sc_dev,
   13745 		    "could not acquire SWSM SMBI\n");
   13746 		return 1;
   13747 	}
   13748 
   13749 	/* Get the FW semaphore. */
   13750 	timeout = sc->sc_nvm_wordsize + 1;
   13751 	while (timeout) {
   13752 		swsm = CSR_READ(sc, WMREG_SWSM);
   13753 		swsm |= SWSM_SWESMBI;
   13754 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13755 		/* If we managed to set the bit we got the semaphore. */
   13756 		swsm = CSR_READ(sc, WMREG_SWSM);
   13757 		if (swsm & SWSM_SWESMBI)
   13758 			break;
   13759 
   13760 		delay(50);
   13761 		timeout--;
   13762 	}
   13763 
   13764 	if (timeout == 0) {
   13765 		aprint_error_dev(sc->sc_dev,
   13766 		    "could not acquire SWSM SWESMBI\n");
   13767 		/* Release semaphores */
   13768 		wm_put_swsm_semaphore(sc);
   13769 		return 1;
   13770 	}
   13771 	return 0;
   13772 }
   13773 
   13774 /*
   13775  * Put hardware semaphore.
   13776  * Same as e1000_put_hw_semaphore_generic()
   13777  */
   13778 static void
   13779 wm_put_swsm_semaphore(struct wm_softc *sc)
   13780 {
   13781 	uint32_t swsm;
   13782 
   13783 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13784 		device_xname(sc->sc_dev), __func__));
   13785 
   13786 	swsm = CSR_READ(sc, WMREG_SWSM);
   13787 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13788 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13789 }
   13790 
   13791 /*
   13792  * Get SW/FW semaphore.
   13793  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13794  */
   13795 static int
   13796 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13797 {
   13798 	uint32_t swfw_sync;
   13799 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13800 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13801 	int timeout;
   13802 
   13803 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13804 		device_xname(sc->sc_dev), __func__));
   13805 
   13806 	if (sc->sc_type == WM_T_80003)
   13807 		timeout = 50;
   13808 	else
   13809 		timeout = 200;
   13810 
   13811 	while (timeout) {
   13812 		if (wm_get_swsm_semaphore(sc)) {
   13813 			aprint_error_dev(sc->sc_dev,
   13814 			    "%s: failed to get semaphore\n",
   13815 			    __func__);
   13816 			return 1;
   13817 		}
   13818 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13819 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13820 			swfw_sync |= swmask;
   13821 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13822 			wm_put_swsm_semaphore(sc);
   13823 			return 0;
   13824 		}
   13825 		wm_put_swsm_semaphore(sc);
   13826 		delay(5000);
   13827 		timeout--;
   13828 	}
   13829 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13830 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13831 	return 1;
   13832 }
   13833 
   13834 static void
   13835 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13836 {
   13837 	uint32_t swfw_sync;
   13838 
   13839 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13840 		device_xname(sc->sc_dev), __func__));
   13841 
   13842 	while (wm_get_swsm_semaphore(sc) != 0)
   13843 		continue;
   13844 
   13845 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13846 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13847 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13848 
   13849 	wm_put_swsm_semaphore(sc);
   13850 }
   13851 
   13852 static int
   13853 wm_get_nvm_80003(struct wm_softc *sc)
   13854 {
   13855 	int rv;
   13856 
   13857 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13858 		device_xname(sc->sc_dev), __func__));
   13859 
   13860 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13861 		aprint_error_dev(sc->sc_dev,
   13862 		    "%s: failed to get semaphore(SWFW)\n",
   13863 		    __func__);
   13864 		return rv;
   13865 	}
   13866 
   13867 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13868 	    && (rv = wm_get_eecd(sc)) != 0) {
   13869 		aprint_error_dev(sc->sc_dev,
   13870 		    "%s: failed to get semaphore(EECD)\n",
   13871 		    __func__);
   13872 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13873 		return rv;
   13874 	}
   13875 
   13876 	return 0;
   13877 }
   13878 
   13879 static void
   13880 wm_put_nvm_80003(struct wm_softc *sc)
   13881 {
   13882 
   13883 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13884 		device_xname(sc->sc_dev), __func__));
   13885 
   13886 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13887 		wm_put_eecd(sc);
   13888 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13889 }
   13890 
   13891 static int
   13892 wm_get_nvm_82571(struct wm_softc *sc)
   13893 {
   13894 	int rv;
   13895 
   13896 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13897 		device_xname(sc->sc_dev), __func__));
   13898 
   13899 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13900 		return rv;
   13901 
   13902 	switch (sc->sc_type) {
   13903 	case WM_T_82573:
   13904 		break;
   13905 	default:
   13906 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13907 			rv = wm_get_eecd(sc);
   13908 		break;
   13909 	}
   13910 
   13911 	if (rv != 0) {
   13912 		aprint_error_dev(sc->sc_dev,
   13913 		    "%s: failed to get semaphore\n",
   13914 		    __func__);
   13915 		wm_put_swsm_semaphore(sc);
   13916 	}
   13917 
   13918 	return rv;
   13919 }
   13920 
   13921 static void
   13922 wm_put_nvm_82571(struct wm_softc *sc)
   13923 {
   13924 
   13925 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13926 		device_xname(sc->sc_dev), __func__));
   13927 
   13928 	switch (sc->sc_type) {
   13929 	case WM_T_82573:
   13930 		break;
   13931 	default:
   13932 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13933 			wm_put_eecd(sc);
   13934 		break;
   13935 	}
   13936 
   13937 	wm_put_swsm_semaphore(sc);
   13938 }
   13939 
   13940 static int
   13941 wm_get_phy_82575(struct wm_softc *sc)
   13942 {
   13943 
   13944 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13945 		device_xname(sc->sc_dev), __func__));
   13946 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13947 }
   13948 
   13949 static void
   13950 wm_put_phy_82575(struct wm_softc *sc)
   13951 {
   13952 
   13953 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13954 		device_xname(sc->sc_dev), __func__));
   13955 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13956 }
   13957 
   13958 static int
   13959 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13960 {
   13961 	uint32_t ext_ctrl;
   13962 	int timeout = 200;
   13963 
   13964 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13965 		device_xname(sc->sc_dev), __func__));
   13966 
   13967 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13968 	for (timeout = 0; timeout < 200; timeout++) {
   13969 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13970 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13971 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13972 
   13973 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13974 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13975 			return 0;
   13976 		delay(5000);
   13977 	}
   13978 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13979 	    device_xname(sc->sc_dev), ext_ctrl);
   13980 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13981 	return 1;
   13982 }
   13983 
   13984 static void
   13985 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13986 {
   13987 	uint32_t ext_ctrl;
   13988 
   13989 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13990 		device_xname(sc->sc_dev), __func__));
   13991 
   13992 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13993 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13994 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13995 
   13996 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13997 }
   13998 
   13999 static int
   14000 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14001 {
   14002 	uint32_t ext_ctrl;
   14003 	int timeout;
   14004 
   14005 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14006 		device_xname(sc->sc_dev), __func__));
   14007 	mutex_enter(sc->sc_ich_phymtx);
   14008 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14009 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14010 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14011 			break;
   14012 		delay(1000);
   14013 	}
   14014 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14015 		printf("%s: SW has already locked the resource\n",
   14016 		    device_xname(sc->sc_dev));
   14017 		goto out;
   14018 	}
   14019 
   14020 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14021 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14022 	for (timeout = 0; timeout < 1000; timeout++) {
   14023 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14024 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14025 			break;
   14026 		delay(1000);
   14027 	}
   14028 	if (timeout >= 1000) {
   14029 		printf("%s: failed to acquire semaphore\n",
   14030 		    device_xname(sc->sc_dev));
   14031 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14032 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14033 		goto out;
   14034 	}
   14035 	return 0;
   14036 
   14037 out:
   14038 	mutex_exit(sc->sc_ich_phymtx);
   14039 	return 1;
   14040 }
   14041 
   14042 static void
   14043 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14044 {
   14045 	uint32_t ext_ctrl;
   14046 
   14047 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14048 		device_xname(sc->sc_dev), __func__));
   14049 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14050 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14051 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14052 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14053 	} else {
   14054 		printf("%s: Semaphore unexpectedly released\n",
   14055 		    device_xname(sc->sc_dev));
   14056 	}
   14057 
   14058 	mutex_exit(sc->sc_ich_phymtx);
   14059 }
   14060 
   14061 static int
   14062 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14063 {
   14064 
   14065 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14066 		device_xname(sc->sc_dev), __func__));
   14067 	mutex_enter(sc->sc_ich_nvmmtx);
   14068 
   14069 	return 0;
   14070 }
   14071 
   14072 static void
   14073 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14074 {
   14075 
   14076 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14077 		device_xname(sc->sc_dev), __func__));
   14078 	mutex_exit(sc->sc_ich_nvmmtx);
   14079 }
   14080 
   14081 static int
   14082 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14083 {
   14084 	int i = 0;
   14085 	uint32_t reg;
   14086 
   14087 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14088 		device_xname(sc->sc_dev), __func__));
   14089 
   14090 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14091 	do {
   14092 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14093 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14094 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14095 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14096 			break;
   14097 		delay(2*1000);
   14098 		i++;
   14099 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14100 
   14101 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14102 		wm_put_hw_semaphore_82573(sc);
   14103 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14104 		    device_xname(sc->sc_dev));
   14105 		return -1;
   14106 	}
   14107 
   14108 	return 0;
   14109 }
   14110 
   14111 static void
   14112 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14113 {
   14114 	uint32_t reg;
   14115 
   14116 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14117 		device_xname(sc->sc_dev), __func__));
   14118 
   14119 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14120 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14121 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14122 }
   14123 
   14124 /*
   14125  * Management mode and power management related subroutines.
   14126  * BMC, AMT, suspend/resume and EEE.
   14127  */
   14128 
   14129 #ifdef WM_WOL
   14130 static int
   14131 wm_check_mng_mode(struct wm_softc *sc)
   14132 {
   14133 	int rv;
   14134 
   14135 	switch (sc->sc_type) {
   14136 	case WM_T_ICH8:
   14137 	case WM_T_ICH9:
   14138 	case WM_T_ICH10:
   14139 	case WM_T_PCH:
   14140 	case WM_T_PCH2:
   14141 	case WM_T_PCH_LPT:
   14142 	case WM_T_PCH_SPT:
   14143 	case WM_T_PCH_CNP:
   14144 		rv = wm_check_mng_mode_ich8lan(sc);
   14145 		break;
   14146 	case WM_T_82574:
   14147 	case WM_T_82583:
   14148 		rv = wm_check_mng_mode_82574(sc);
   14149 		break;
   14150 	case WM_T_82571:
   14151 	case WM_T_82572:
   14152 	case WM_T_82573:
   14153 	case WM_T_80003:
   14154 		rv = wm_check_mng_mode_generic(sc);
   14155 		break;
   14156 	default:
   14157 		/* noting to do */
   14158 		rv = 0;
   14159 		break;
   14160 	}
   14161 
   14162 	return rv;
   14163 }
   14164 
   14165 static int
   14166 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14167 {
   14168 	uint32_t fwsm;
   14169 
   14170 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14171 
   14172 	if (((fwsm & FWSM_FW_VALID) != 0)
   14173 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14174 		return 1;
   14175 
   14176 	return 0;
   14177 }
   14178 
   14179 static int
   14180 wm_check_mng_mode_82574(struct wm_softc *sc)
   14181 {
   14182 	uint16_t data;
   14183 
   14184 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14185 
   14186 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14187 		return 1;
   14188 
   14189 	return 0;
   14190 }
   14191 
   14192 static int
   14193 wm_check_mng_mode_generic(struct wm_softc *sc)
   14194 {
   14195 	uint32_t fwsm;
   14196 
   14197 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14198 
   14199 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14200 		return 1;
   14201 
   14202 	return 0;
   14203 }
   14204 #endif /* WM_WOL */
   14205 
   14206 static int
   14207 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14208 {
   14209 	uint32_t manc, fwsm, factps;
   14210 
   14211 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14212 		return 0;
   14213 
   14214 	manc = CSR_READ(sc, WMREG_MANC);
   14215 
   14216 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14217 		device_xname(sc->sc_dev), manc));
   14218 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14219 		return 0;
   14220 
   14221 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14222 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14223 		factps = CSR_READ(sc, WMREG_FACTPS);
   14224 		if (((factps & FACTPS_MNGCG) == 0)
   14225 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14226 			return 1;
   14227 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14228 		uint16_t data;
   14229 
   14230 		factps = CSR_READ(sc, WMREG_FACTPS);
   14231 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14232 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14233 			device_xname(sc->sc_dev), factps, data));
   14234 		if (((factps & FACTPS_MNGCG) == 0)
   14235 		    && ((data & NVM_CFG2_MNGM_MASK)
   14236 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14237 			return 1;
   14238 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14239 	    && ((manc & MANC_ASF_EN) == 0))
   14240 		return 1;
   14241 
   14242 	return 0;
   14243 }
   14244 
   14245 static bool
   14246 wm_phy_resetisblocked(struct wm_softc *sc)
   14247 {
   14248 	bool blocked = false;
   14249 	uint32_t reg;
   14250 	int i = 0;
   14251 
   14252 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14253 		device_xname(sc->sc_dev), __func__));
   14254 
   14255 	switch (sc->sc_type) {
   14256 	case WM_T_ICH8:
   14257 	case WM_T_ICH9:
   14258 	case WM_T_ICH10:
   14259 	case WM_T_PCH:
   14260 	case WM_T_PCH2:
   14261 	case WM_T_PCH_LPT:
   14262 	case WM_T_PCH_SPT:
   14263 	case WM_T_PCH_CNP:
   14264 		do {
   14265 			reg = CSR_READ(sc, WMREG_FWSM);
   14266 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14267 				blocked = true;
   14268 				delay(10*1000);
   14269 				continue;
   14270 			}
   14271 			blocked = false;
   14272 		} while (blocked && (i++ < 30));
   14273 		return blocked;
   14274 		break;
   14275 	case WM_T_82571:
   14276 	case WM_T_82572:
   14277 	case WM_T_82573:
   14278 	case WM_T_82574:
   14279 	case WM_T_82583:
   14280 	case WM_T_80003:
   14281 		reg = CSR_READ(sc, WMREG_MANC);
   14282 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14283 			return true;
   14284 		else
   14285 			return false;
   14286 		break;
   14287 	default:
   14288 		/* no problem */
   14289 		break;
   14290 	}
   14291 
   14292 	return false;
   14293 }
   14294 
   14295 static void
   14296 wm_get_hw_control(struct wm_softc *sc)
   14297 {
   14298 	uint32_t reg;
   14299 
   14300 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14301 		device_xname(sc->sc_dev), __func__));
   14302 
   14303 	if (sc->sc_type == WM_T_82573) {
   14304 		reg = CSR_READ(sc, WMREG_SWSM);
   14305 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14306 	} else if (sc->sc_type >= WM_T_82571) {
   14307 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14308 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14309 	}
   14310 }
   14311 
   14312 static void
   14313 wm_release_hw_control(struct wm_softc *sc)
   14314 {
   14315 	uint32_t reg;
   14316 
   14317 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14318 		device_xname(sc->sc_dev), __func__));
   14319 
   14320 	if (sc->sc_type == WM_T_82573) {
   14321 		reg = CSR_READ(sc, WMREG_SWSM);
   14322 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14323 	} else if (sc->sc_type >= WM_T_82571) {
   14324 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14325 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14326 	}
   14327 }
   14328 
   14329 static void
   14330 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14331 {
   14332 	uint32_t reg;
   14333 
   14334 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14335 		device_xname(sc->sc_dev), __func__));
   14336 
   14337 	if (sc->sc_type < WM_T_PCH2)
   14338 		return;
   14339 
   14340 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14341 
   14342 	if (gate)
   14343 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14344 	else
   14345 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14346 
   14347 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14348 }
   14349 
   14350 static int
   14351 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14352 {
   14353 	uint32_t fwsm, reg;
   14354 	int rv = 0;
   14355 
   14356 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14357 		device_xname(sc->sc_dev), __func__));
   14358 
   14359 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14360 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14361 
   14362 	/* Disable ULP */
   14363 	wm_ulp_disable(sc);
   14364 
   14365 	/* Acquire PHY semaphore */
   14366 	rv = sc->phy.acquire(sc);
   14367 	if (rv != 0) {
   14368 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14369 		device_xname(sc->sc_dev), __func__));
   14370 		return -1;
   14371 	}
   14372 
   14373 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14374 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14375 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14376 	 */
   14377 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14378 	switch (sc->sc_type) {
   14379 	case WM_T_PCH_LPT:
   14380 	case WM_T_PCH_SPT:
   14381 	case WM_T_PCH_CNP:
   14382 		if (wm_phy_is_accessible_pchlan(sc))
   14383 			break;
   14384 
   14385 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14386 		 * forcing MAC to SMBus mode first.
   14387 		 */
   14388 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14389 		reg |= CTRL_EXT_FORCE_SMBUS;
   14390 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14391 #if 0
   14392 		/* XXX Isn't this required??? */
   14393 		CSR_WRITE_FLUSH(sc);
   14394 #endif
   14395 		/* Wait 50 milliseconds for MAC to finish any retries
   14396 		 * that it might be trying to perform from previous
   14397 		 * attempts to acknowledge any phy read requests.
   14398 		 */
   14399 		delay(50 * 1000);
   14400 		/* FALLTHROUGH */
   14401 	case WM_T_PCH2:
   14402 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14403 			break;
   14404 		/* FALLTHROUGH */
   14405 	case WM_T_PCH:
   14406 		if (sc->sc_type == WM_T_PCH)
   14407 			if ((fwsm & FWSM_FW_VALID) != 0)
   14408 				break;
   14409 
   14410 		if (wm_phy_resetisblocked(sc) == true) {
   14411 			printf("XXX reset is blocked(3)\n");
   14412 			break;
   14413 		}
   14414 
   14415 		/* Toggle LANPHYPC Value bit */
   14416 		wm_toggle_lanphypc_pch_lpt(sc);
   14417 
   14418 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14419 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14420 				break;
   14421 
   14422 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14423 			 * so ensure that the MAC is also out of SMBus mode
   14424 			 */
   14425 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14426 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14427 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14428 
   14429 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14430 				break;
   14431 			rv = -1;
   14432 		}
   14433 		break;
   14434 	default:
   14435 		break;
   14436 	}
   14437 
   14438 	/* Release semaphore */
   14439 	sc->phy.release(sc);
   14440 
   14441 	if (rv == 0) {
   14442 		/* Check to see if able to reset PHY.  Print error if not */
   14443 		if (wm_phy_resetisblocked(sc)) {
   14444 			printf("XXX reset is blocked(4)\n");
   14445 			goto out;
   14446 		}
   14447 
   14448 		/* Reset the PHY before any access to it.  Doing so, ensures
   14449 		 * that the PHY is in a known good state before we read/write
   14450 		 * PHY registers.  The generic reset is sufficient here,
   14451 		 * because we haven't determined the PHY type yet.
   14452 		 */
   14453 		if (wm_reset_phy(sc) != 0)
   14454 			goto out;
   14455 
   14456 		/* On a successful reset, possibly need to wait for the PHY
   14457 		 * to quiesce to an accessible state before returning control
   14458 		 * to the calling function.  If the PHY does not quiesce, then
   14459 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14460 		 *  the PHY is in.
   14461 		 */
   14462 		if (wm_phy_resetisblocked(sc))
   14463 			printf("XXX reset is blocked(4)\n");
   14464 	}
   14465 
   14466 out:
   14467 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14468 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14469 		delay(10*1000);
   14470 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14471 	}
   14472 
   14473 	return 0;
   14474 }
   14475 
   14476 static void
   14477 wm_init_manageability(struct wm_softc *sc)
   14478 {
   14479 
   14480 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14481 		device_xname(sc->sc_dev), __func__));
   14482 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14483 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14484 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14485 
   14486 		/* Disable hardware interception of ARP */
   14487 		manc &= ~MANC_ARP_EN;
   14488 
   14489 		/* Enable receiving management packets to the host */
   14490 		if (sc->sc_type >= WM_T_82571) {
   14491 			manc |= MANC_EN_MNG2HOST;
   14492 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14493 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14494 		}
   14495 
   14496 		CSR_WRITE(sc, WMREG_MANC, manc);
   14497 	}
   14498 }
   14499 
   14500 static void
   14501 wm_release_manageability(struct wm_softc *sc)
   14502 {
   14503 
   14504 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14505 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14506 
   14507 		manc |= MANC_ARP_EN;
   14508 		if (sc->sc_type >= WM_T_82571)
   14509 			manc &= ~MANC_EN_MNG2HOST;
   14510 
   14511 		CSR_WRITE(sc, WMREG_MANC, manc);
   14512 	}
   14513 }
   14514 
   14515 static void
   14516 wm_get_wakeup(struct wm_softc *sc)
   14517 {
   14518 
   14519 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14520 	switch (sc->sc_type) {
   14521 	case WM_T_82573:
   14522 	case WM_T_82583:
   14523 		sc->sc_flags |= WM_F_HAS_AMT;
   14524 		/* FALLTHROUGH */
   14525 	case WM_T_80003:
   14526 	case WM_T_82575:
   14527 	case WM_T_82576:
   14528 	case WM_T_82580:
   14529 	case WM_T_I350:
   14530 	case WM_T_I354:
   14531 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14532 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14533 		/* FALLTHROUGH */
   14534 	case WM_T_82541:
   14535 	case WM_T_82541_2:
   14536 	case WM_T_82547:
   14537 	case WM_T_82547_2:
   14538 	case WM_T_82571:
   14539 	case WM_T_82572:
   14540 	case WM_T_82574:
   14541 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14542 		break;
   14543 	case WM_T_ICH8:
   14544 	case WM_T_ICH9:
   14545 	case WM_T_ICH10:
   14546 	case WM_T_PCH:
   14547 	case WM_T_PCH2:
   14548 	case WM_T_PCH_LPT:
   14549 	case WM_T_PCH_SPT:
   14550 	case WM_T_PCH_CNP:
   14551 		sc->sc_flags |= WM_F_HAS_AMT;
   14552 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14553 		break;
   14554 	default:
   14555 		break;
   14556 	}
   14557 
   14558 	/* 1: HAS_MANAGE */
   14559 	if (wm_enable_mng_pass_thru(sc) != 0)
   14560 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14561 
   14562 	/*
   14563 	 * Note that the WOL flags is set after the resetting of the eeprom
   14564 	 * stuff
   14565 	 */
   14566 }
   14567 
   14568 /*
   14569  * Unconfigure Ultra Low Power mode.
   14570  * Only for I217 and newer (see below).
   14571  */
   14572 static int
   14573 wm_ulp_disable(struct wm_softc *sc)
   14574 {
   14575 	uint32_t reg;
   14576 	uint16_t phyreg;
   14577 	int i = 0, rv = 0;
   14578 
   14579 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14580 		device_xname(sc->sc_dev), __func__));
   14581 	/* Exclude old devices */
   14582 	if ((sc->sc_type < WM_T_PCH_LPT)
   14583 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14584 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14585 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14586 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14587 		return 0;
   14588 
   14589 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14590 		/* Request ME un-configure ULP mode in the PHY */
   14591 		reg = CSR_READ(sc, WMREG_H2ME);
   14592 		reg &= ~H2ME_ULP;
   14593 		reg |= H2ME_ENFORCE_SETTINGS;
   14594 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14595 
   14596 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14597 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14598 			if (i++ == 30) {
   14599 				printf("%s timed out\n", __func__);
   14600 				return -1;
   14601 			}
   14602 			delay(10 * 1000);
   14603 		}
   14604 		reg = CSR_READ(sc, WMREG_H2ME);
   14605 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14606 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14607 
   14608 		return 0;
   14609 	}
   14610 
   14611 	/* Acquire semaphore */
   14612 	rv = sc->phy.acquire(sc);
   14613 	if (rv != 0) {
   14614 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14615 		device_xname(sc->sc_dev), __func__));
   14616 		return -1;
   14617 	}
   14618 
   14619 	/* Toggle LANPHYPC */
   14620 	wm_toggle_lanphypc_pch_lpt(sc);
   14621 
   14622 	/* Unforce SMBus mode in PHY */
   14623 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14624 	if (rv != 0) {
   14625 		uint32_t reg2;
   14626 
   14627 		printf("%s: Force SMBus first.\n", __func__);
   14628 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14629 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14630 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14631 		delay(50 * 1000);
   14632 
   14633 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14634 		    &phyreg);
   14635 		if (rv != 0)
   14636 			goto release;
   14637 	}
   14638 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14639 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14640 
   14641 	/* Unforce SMBus mode in MAC */
   14642 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14643 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14644 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14645 
   14646 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14647 	if (rv != 0)
   14648 		goto release;
   14649 	phyreg |= HV_PM_CTRL_K1_ENA;
   14650 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14651 
   14652 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14653 		&phyreg);
   14654 	if (rv != 0)
   14655 		goto release;
   14656 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14657 	    | I218_ULP_CONFIG1_STICKY_ULP
   14658 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14659 	    | I218_ULP_CONFIG1_WOL_HOST
   14660 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14661 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14662 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14663 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14664 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14665 	phyreg |= I218_ULP_CONFIG1_START;
   14666 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14667 
   14668 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14669 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14670 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14671 
   14672 release:
   14673 	/* Release semaphore */
   14674 	sc->phy.release(sc);
   14675 	wm_gmii_reset(sc);
   14676 	delay(50 * 1000);
   14677 
   14678 	return rv;
   14679 }
   14680 
   14681 /* WOL in the newer chipset interfaces (pchlan) */
   14682 static int
   14683 wm_enable_phy_wakeup(struct wm_softc *sc)
   14684 {
   14685 	device_t dev = sc->sc_dev;
   14686 	uint32_t mreg, moff;
   14687 	uint16_t wuce, wuc, wufc, preg;
   14688 	int i, rv;
   14689 
   14690 	KASSERT(sc->sc_type >= WM_T_PCH);
   14691 
   14692 	/* Copy MAC RARs to PHY RARs */
   14693 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   14694 
   14695 	/* Activate PHY wakeup */
   14696 	rv = sc->phy.acquire(sc);
   14697 	if (rv != 0) {
   14698 		device_printf(dev, "%s: failed to acquire semaphore\n",
   14699 		    __func__);
   14700 		return rv;
   14701 	}
   14702 
   14703 	/*
   14704 	 * Enable access to PHY wakeup registers.
   14705 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   14706 	 */
   14707 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   14708 	if (rv != 0) {
   14709 		device_printf(dev,
   14710 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   14711 		goto release;
   14712 	}
   14713 
   14714 	/* Copy MAC MTA to PHY MTA */
   14715 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   14716 		uint16_t lo, hi;
   14717 
   14718 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   14719 		lo = (uint16_t)(mreg & 0xffff);
   14720 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   14721 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   14722 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   14723 	}
   14724 
   14725 	/* Configure PHY Rx Control register */
   14726 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   14727 	mreg = CSR_READ(sc, WMREG_RCTL);
   14728 	if (mreg & RCTL_UPE)
   14729 		preg |= BM_RCTL_UPE;
   14730 	if (mreg & RCTL_MPE)
   14731 		preg |= BM_RCTL_MPE;
   14732 	preg &= ~(BM_RCTL_MO_MASK);
   14733 	moff = __SHIFTOUT(mreg, RCTL_MO);
   14734 	if (moff != 0)
   14735 		preg |= moff << BM_RCTL_MO_SHIFT;
   14736 	if (mreg & RCTL_BAM)
   14737 		preg |= BM_RCTL_BAM;
   14738 	if (mreg & RCTL_PMCF)
   14739 		preg |= BM_RCTL_PMCF;
   14740 	mreg = CSR_READ(sc, WMREG_CTRL);
   14741 	if (mreg & CTRL_RFCE)
   14742 		preg |= BM_RCTL_RFCE;
   14743 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   14744 
   14745 	wuc = WUC_APME | WUC_PME_EN;
   14746 	wufc = WUFC_MAG;
   14747 	/* Enable PHY wakeup in MAC register */
   14748 	CSR_WRITE(sc, WMREG_WUC,
   14749 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   14750 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   14751 
   14752 	/* Configure and enable PHY wakeup in PHY registers */
   14753 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   14754 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   14755 
   14756 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   14757 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   14758 
   14759 release:
   14760 	sc->phy.release(sc);
   14761 
   14762 	return 0;
   14763 }
   14764 
   14765 /* Power down workaround on D3 */
   14766 static void
   14767 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14768 {
   14769 	uint32_t reg;
   14770 	uint16_t phyreg;
   14771 	int i;
   14772 
   14773 	for (i = 0; i < 2; i++) {
   14774 		/* Disable link */
   14775 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14776 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14777 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14778 
   14779 		/*
   14780 		 * Call gig speed drop workaround on Gig disable before
   14781 		 * accessing any PHY registers
   14782 		 */
   14783 		if (sc->sc_type == WM_T_ICH8)
   14784 			wm_gig_downshift_workaround_ich8lan(sc);
   14785 
   14786 		/* Write VR power-down enable */
   14787 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14788 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14789 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14790 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   14791 
   14792 		/* Read it back and test */
   14793 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14794 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14795 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14796 			break;
   14797 
   14798 		/* Issue PHY reset and repeat at most one more time */
   14799 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14800 	}
   14801 }
   14802 
   14803 /*
   14804  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   14805  *  @sc: pointer to the HW structure
   14806  *
   14807  *  During S0 to Sx transition, it is possible the link remains at gig
   14808  *  instead of negotiating to a lower speed.  Before going to Sx, set
   14809  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   14810  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   14811  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   14812  *  needs to be written.
   14813  *  Parts that support (and are linked to a partner which support) EEE in
   14814  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   14815  *  than 10Mbps w/o EEE.
   14816  */
   14817 static void
   14818 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   14819 {
   14820 	device_t dev = sc->sc_dev;
   14821 	struct ethercom *ec = &sc->sc_ethercom;
   14822 	uint32_t phy_ctrl;
   14823 	int rv;
   14824 
   14825 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   14826 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   14827 
   14828 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   14829 
   14830 	if (sc->sc_phytype == WMPHY_I217) {
   14831 		uint16_t devid = sc->sc_pcidevid;
   14832 
   14833 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   14834 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   14835 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   14836 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   14837 		    (sc->sc_type >= WM_T_PCH_SPT))
   14838 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   14839 			    CSR_READ(sc, WMREG_FEXTNVM6)
   14840 			    & ~FEXTNVM6_REQ_PLL_CLK);
   14841 
   14842 		if (sc->phy.acquire(sc) != 0)
   14843 			goto out;
   14844 
   14845 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   14846 			uint16_t eee_advert;
   14847 
   14848 			rv = wm_read_emi_reg_locked(dev,
   14849 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   14850 			if (rv)
   14851 				goto release;
   14852 
   14853 			/*
   14854 			 * Disable LPLU if both link partners support 100BaseT
   14855 			 * EEE and 100Full is advertised on both ends of the
   14856 			 * link, and enable Auto Enable LPI since there will
   14857 			 * be no driver to enable LPI while in Sx.
   14858 			 */
   14859 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   14860 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   14861 				uint16_t anar, phy_reg;
   14862 
   14863 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   14864 				    &anar);
   14865 				if (anar & ANAR_TX_FD) {
   14866 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   14867 					    PHY_CTRL_NOND0A_LPLU);
   14868 
   14869 					/* Set Auto Enable LPI after link up */
   14870 					sc->phy.readreg_locked(dev, 2,
   14871 					    I217_LPI_GPIO_CTRL, &phy_reg);
   14872 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14873 					sc->phy.writereg_locked(dev, 2,
   14874 					    I217_LPI_GPIO_CTRL, phy_reg);
   14875 				}
   14876 			}
   14877 		}
   14878 
   14879 		/*
   14880 		 * For i217 Intel Rapid Start Technology support,
   14881 		 * when the system is going into Sx and no manageability engine
   14882 		 * is present, the driver must configure proxy to reset only on
   14883 		 * power good.	LPI (Low Power Idle) state must also reset only
   14884 		 * on power good, as well as the MTA (Multicast table array).
   14885 		 * The SMBus release must also be disabled on LCD reset.
   14886 		 */
   14887 
   14888 		/*
   14889 		 * Enable MTA to reset for Intel Rapid Start Technology
   14890 		 * Support
   14891 		 */
   14892 
   14893 release:
   14894 		sc->phy.release(sc);
   14895 	}
   14896 out:
   14897 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   14898 
   14899 	if (sc->sc_type == WM_T_ICH8)
   14900 		wm_gig_downshift_workaround_ich8lan(sc);
   14901 
   14902 	if (sc->sc_type >= WM_T_PCH) {
   14903 		wm_oem_bits_config_ich8lan(sc, false);
   14904 
   14905 		/* Reset PHY to activate OEM bits on 82577/8 */
   14906 		if (sc->sc_type == WM_T_PCH)
   14907 			wm_reset_phy(sc);
   14908 
   14909 		if (sc->phy.acquire(sc) != 0)
   14910 			return;
   14911 		wm_write_smbus_addr(sc);
   14912 		sc->phy.release(sc);
   14913 	}
   14914 }
   14915 
   14916 /*
   14917  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   14918  *  @sc: pointer to the HW structure
   14919  *
   14920  *  During Sx to S0 transitions on non-managed devices or managed devices
   14921  *  on which PHY resets are not blocked, if the PHY registers cannot be
   14922  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   14923  *  the PHY.
   14924  *  On i217, setup Intel Rapid Start Technology.
   14925  */
   14926 static int
   14927 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   14928 {
   14929 	device_t dev = sc->sc_dev;
   14930 	int rv;
   14931 
   14932 	if (sc->sc_type < WM_T_PCH2)
   14933 		return 0;
   14934 
   14935 	rv = wm_init_phy_workarounds_pchlan(sc);
   14936 	if (rv != 0)
   14937 		return -1;
   14938 
   14939 	/* For i217 Intel Rapid Start Technology support when the system
   14940 	 * is transitioning from Sx and no manageability engine is present
   14941 	 * configure SMBus to restore on reset, disable proxy, and enable
   14942 	 * the reset on MTA (Multicast table array).
   14943 	 */
   14944 	if (sc->sc_phytype == WMPHY_I217) {
   14945 		uint16_t phy_reg;
   14946 
   14947 		if (sc->phy.acquire(sc) != 0)
   14948 			return -1;
   14949 
   14950 		/* Clear Auto Enable LPI after link up */
   14951 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   14952 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14953 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   14954 
   14955 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14956 			/* Restore clear on SMB if no manageability engine
   14957 			 * is present
   14958 			 */
   14959 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   14960 			    &phy_reg);
   14961 			if (rv != 0)
   14962 				goto release;
   14963 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   14964 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   14965 
   14966 			/* Disable Proxy */
   14967 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   14968 		}
   14969 		/* Enable reset on MTA */
   14970 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   14971 		if (rv != 0)
   14972 			goto release;
   14973 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   14974 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   14975 
   14976 release:
   14977 		sc->phy.release(sc);
   14978 		return rv;
   14979 	}
   14980 
   14981 	return 0;
   14982 }
   14983 
   14984 static void
   14985 wm_enable_wakeup(struct wm_softc *sc)
   14986 {
   14987 	uint32_t reg, pmreg;
   14988 	pcireg_t pmode;
   14989 	int rv = 0;
   14990 
   14991 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14992 		device_xname(sc->sc_dev), __func__));
   14993 
   14994 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14995 	    &pmreg, NULL) == 0)
   14996 		return;
   14997 
   14998 	if ((sc->sc_flags & WM_F_WOL) == 0)
   14999 		goto pme;
   15000 
   15001 	/* Advertise the wakeup capability */
   15002 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15003 	    | CTRL_SWDPIN(3));
   15004 
   15005 	/* Keep the laser running on fiber adapters */
   15006 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15007 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15008 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15009 		reg |= CTRL_EXT_SWDPIN(3);
   15010 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15011 	}
   15012 
   15013 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15014 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15015 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15016 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15017 		wm_suspend_workarounds_ich8lan(sc);
   15018 
   15019 #if 0	/* for the multicast packet */
   15020 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15021 	reg |= WUFC_MC;
   15022 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15023 #endif
   15024 
   15025 	if (sc->sc_type >= WM_T_PCH) {
   15026 		rv = wm_enable_phy_wakeup(sc);
   15027 		if (rv != 0)
   15028 			goto pme;
   15029 	} else {
   15030 		/* Enable wakeup by the MAC */
   15031 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15032 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15033 	}
   15034 
   15035 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15036 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15037 		|| (sc->sc_type == WM_T_PCH2))
   15038 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15039 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15040 
   15041 pme:
   15042 	/* Request PME */
   15043 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15044 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15045 		/* For WOL */
   15046 		pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   15047 	} else {
   15048 		/* Disable WOL */
   15049 		pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   15050 	}
   15051 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15052 }
   15053 
   15054 /* Disable ASPM L0s and/or L1 for workaround */
   15055 static void
   15056 wm_disable_aspm(struct wm_softc *sc)
   15057 {
   15058 	pcireg_t reg, mask = 0;
   15059 	unsigned const char *str = "";
   15060 
   15061 	/*
   15062 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15063 	 * space.
   15064 	 */
   15065 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15066 		return;
   15067 
   15068 	switch (sc->sc_type) {
   15069 	case WM_T_82571:
   15070 	case WM_T_82572:
   15071 		/*
   15072 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15073 		 * State Power management L1 State (ASPM L1).
   15074 		 */
   15075 		mask = PCIE_LCSR_ASPM_L1;
   15076 		str = "L1 is";
   15077 		break;
   15078 	case WM_T_82573:
   15079 	case WM_T_82574:
   15080 	case WM_T_82583:
   15081 		/*
   15082 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15083 		 *
   15084 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15085 		 * some chipset.  The document of 82574 and 82583 says that
   15086 		 * disabling L0s with some specific chipset is sufficient,
   15087 		 * but we follow as of the Intel em driver does.
   15088 		 *
   15089 		 * References:
   15090 		 * Errata 8 of the Specification Update of i82573.
   15091 		 * Errata 20 of the Specification Update of i82574.
   15092 		 * Errata 9 of the Specification Update of i82583.
   15093 		 */
   15094 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15095 		str = "L0s and L1 are";
   15096 		break;
   15097 	default:
   15098 		return;
   15099 	}
   15100 
   15101 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15102 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15103 	reg &= ~mask;
   15104 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15105 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15106 
   15107 	/* Print only in wm_attach() */
   15108 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15109 		aprint_verbose_dev(sc->sc_dev,
   15110 		    "ASPM %s disabled to workaround the errata.\n", str);
   15111 }
   15112 
   15113 /* LPLU */
   15114 
   15115 static void
   15116 wm_lplu_d0_disable(struct wm_softc *sc)
   15117 {
   15118 	struct mii_data *mii = &sc->sc_mii;
   15119 	uint32_t reg;
   15120 	uint16_t phyval;
   15121 
   15122 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15123 		device_xname(sc->sc_dev), __func__));
   15124 
   15125 	if (sc->sc_phytype == WMPHY_IFE)
   15126 		return;
   15127 
   15128 	switch (sc->sc_type) {
   15129 	case WM_T_82571:
   15130 	case WM_T_82572:
   15131 	case WM_T_82573:
   15132 	case WM_T_82575:
   15133 	case WM_T_82576:
   15134 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   15135 		phyval &= ~PMR_D0_LPLU;
   15136 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   15137 		break;
   15138 	case WM_T_82580:
   15139 	case WM_T_I350:
   15140 	case WM_T_I210:
   15141 	case WM_T_I211:
   15142 		reg = CSR_READ(sc, WMREG_PHPM);
   15143 		reg &= ~PHPM_D0A_LPLU;
   15144 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15145 		break;
   15146 	case WM_T_82574:
   15147 	case WM_T_82583:
   15148 	case WM_T_ICH8:
   15149 	case WM_T_ICH9:
   15150 	case WM_T_ICH10:
   15151 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15152 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15153 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15154 		CSR_WRITE_FLUSH(sc);
   15155 		break;
   15156 	case WM_T_PCH:
   15157 	case WM_T_PCH2:
   15158 	case WM_T_PCH_LPT:
   15159 	case WM_T_PCH_SPT:
   15160 	case WM_T_PCH_CNP:
   15161 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15162 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15163 		if (wm_phy_resetisblocked(sc) == false)
   15164 			phyval |= HV_OEM_BITS_ANEGNOW;
   15165 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15166 		break;
   15167 	default:
   15168 		break;
   15169 	}
   15170 }
   15171 
   15172 /* EEE */
   15173 
   15174 static int
   15175 wm_set_eee_i350(struct wm_softc *sc)
   15176 {
   15177 	struct ethercom *ec = &sc->sc_ethercom;
   15178 	uint32_t ipcnfg, eeer;
   15179 	uint32_t ipcnfg_mask
   15180 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15181 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15182 
   15183 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15184 	eeer = CSR_READ(sc, WMREG_EEER);
   15185 
   15186 	/* enable or disable per user setting */
   15187 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15188 		ipcnfg |= ipcnfg_mask;
   15189 		eeer |= eeer_mask;
   15190 	} else {
   15191 		ipcnfg &= ~ipcnfg_mask;
   15192 		eeer &= ~eeer_mask;
   15193 	}
   15194 
   15195 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15196 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15197 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15198 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15199 
   15200 	return 0;
   15201 }
   15202 
   15203 static int
   15204 wm_set_eee_pchlan(struct wm_softc *sc)
   15205 {
   15206 	device_t dev = sc->sc_dev;
   15207 	struct ethercom *ec = &sc->sc_ethercom;
   15208 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15209 	int rv = 0;
   15210 
   15211 	switch (sc->sc_phytype) {
   15212 	case WMPHY_82579:
   15213 		lpa = I82579_EEE_LP_ABILITY;
   15214 		pcs_status = I82579_EEE_PCS_STATUS;
   15215 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15216 		break;
   15217 	case WMPHY_I217:
   15218 		lpa = I217_EEE_LP_ABILITY;
   15219 		pcs_status = I217_EEE_PCS_STATUS;
   15220 		adv_addr = I217_EEE_ADVERTISEMENT;
   15221 		break;
   15222 	default:
   15223 		return 0;
   15224 	}
   15225 
   15226 	if (sc->phy.acquire(sc)) {
   15227 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15228 		return 0;
   15229 	}
   15230 
   15231 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15232 	if (rv != 0)
   15233 		goto release;
   15234 
   15235 	/* Clear bits that enable EEE in various speeds */
   15236 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15237 
   15238 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15239 		/* Save off link partner's EEE ability */
   15240 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15241 		if (rv != 0)
   15242 			goto release;
   15243 
   15244 		/* Read EEE advertisement */
   15245 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15246 			goto release;
   15247 
   15248 		/*
   15249 		 * Enable EEE only for speeds in which the link partner is
   15250 		 * EEE capable and for which we advertise EEE.
   15251 		 */
   15252 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15253 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15254 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15255 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15256 			if ((data & ANLPAR_TX_FD) != 0)
   15257 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15258 			else {
   15259 				/*
   15260 				 * EEE is not supported in 100Half, so ignore
   15261 				 * partner's EEE in 100 ability if full-duplex
   15262 				 * is not advertised.
   15263 				 */
   15264 				sc->eee_lp_ability
   15265 				    &= ~AN_EEEADVERT_100_TX;
   15266 			}
   15267 		}
   15268 	}
   15269 
   15270 	if (sc->sc_phytype == WMPHY_82579) {
   15271 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15272 		if (rv != 0)
   15273 			goto release;
   15274 
   15275 		data &= ~I82579_LPI_PLL_SHUT_100;
   15276 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15277 	}
   15278 
   15279 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15280 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15281 		goto release;
   15282 
   15283 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15284 release:
   15285 	sc->phy.release(sc);
   15286 
   15287 	return rv;
   15288 }
   15289 
   15290 static int
   15291 wm_set_eee(struct wm_softc *sc)
   15292 {
   15293 	struct ethercom *ec = &sc->sc_ethercom;
   15294 
   15295 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15296 		return 0;
   15297 
   15298 	if (sc->sc_type == WM_T_I354) {
   15299 		/* I354 uses an external PHY */
   15300 		return 0; /* not yet */
   15301 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15302 		return wm_set_eee_i350(sc);
   15303 	else if (sc->sc_type >= WM_T_PCH2)
   15304 		return wm_set_eee_pchlan(sc);
   15305 
   15306 	return 0;
   15307 }
   15308 
   15309 /*
   15310  * Workarounds (mainly PHY related).
   15311  * Basically, PHY's workarounds are in the PHY drivers.
   15312  */
   15313 
   15314 /* Work-around for 82566 Kumeran PCS lock loss */
   15315 static int
   15316 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15317 {
   15318 	struct mii_data *mii = &sc->sc_mii;
   15319 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15320 	int i, reg, rv;
   15321 	uint16_t phyreg;
   15322 
   15323 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15324 		device_xname(sc->sc_dev), __func__));
   15325 
   15326 	/* If the link is not up, do nothing */
   15327 	if ((status & STATUS_LU) == 0)
   15328 		return 0;
   15329 
   15330 	/* Nothing to do if the link is other than 1Gbps */
   15331 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15332 		return 0;
   15333 
   15334 	for (i = 0; i < 10; i++) {
   15335 		/* read twice */
   15336 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15337 		if (rv != 0)
   15338 			return rv;
   15339 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15340 		if (rv != 0)
   15341 			return rv;
   15342 
   15343 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15344 			goto out;	/* GOOD! */
   15345 
   15346 		/* Reset the PHY */
   15347 		wm_reset_phy(sc);
   15348 		delay(5*1000);
   15349 	}
   15350 
   15351 	/* Disable GigE link negotiation */
   15352 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15353 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15354 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15355 
   15356 	/*
   15357 	 * Call gig speed drop workaround on Gig disable before accessing
   15358 	 * any PHY registers.
   15359 	 */
   15360 	wm_gig_downshift_workaround_ich8lan(sc);
   15361 
   15362 out:
   15363 	return 0;
   15364 }
   15365 
   15366 /*
   15367  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15368  *  @sc: pointer to the HW structure
   15369  *
   15370  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15371  *  LPLU, Gig disable, MDIC PHY reset):
   15372  *    1) Set Kumeran Near-end loopback
   15373  *    2) Clear Kumeran Near-end loopback
   15374  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15375  */
   15376 static void
   15377 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15378 {
   15379 	uint16_t kmreg;
   15380 
   15381 	/* Only for igp3 */
   15382 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15383 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15384 			return;
   15385 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15386 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15387 			return;
   15388 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15389 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15390 	}
   15391 }
   15392 
   15393 /*
   15394  * Workaround for pch's PHYs
   15395  * XXX should be moved to new PHY driver?
   15396  */
   15397 static int
   15398 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15399 {
   15400 	device_t dev = sc->sc_dev;
   15401 	struct mii_data *mii = &sc->sc_mii;
   15402 	struct mii_softc *child;
   15403 	uint16_t phy_data, phyrev = 0;
   15404 	int phytype = sc->sc_phytype;
   15405 	int rv;
   15406 
   15407 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15408 		device_xname(dev), __func__));
   15409 	KASSERT(sc->sc_type == WM_T_PCH);
   15410 
   15411 	/* Set MDIO slow mode before any other MDIO access */
   15412 	if (phytype == WMPHY_82577)
   15413 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15414 			return rv;
   15415 
   15416 	child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15417 	if (child != NULL)
   15418 		phyrev = child->mii_mpd_rev;
   15419 
   15420 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15421 	if ((child != NULL) &&
   15422 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15423 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15424 		/* Disable generation of early preamble (0x4431) */
   15425 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15426 		    &phy_data);
   15427 		if (rv != 0)
   15428 			return rv;
   15429 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15430 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15431 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15432 		    phy_data);
   15433 		if (rv != 0)
   15434 			return rv;
   15435 
   15436 		/* Preamble tuning for SSC */
   15437 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15438 		if (rv != 0)
   15439 			return rv;
   15440 	}
   15441 
   15442 	/* 82578 */
   15443 	if (phytype == WMPHY_82578) {
   15444 		/*
   15445 		 * Return registers to default by doing a soft reset then
   15446 		 * writing 0x3140 to the control register
   15447 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15448 		 */
   15449 		if ((child != NULL) && (phyrev < 2)) {
   15450 			PHY_RESET(child);
   15451 			rv = sc->sc_mii.mii_writereg(dev, 2, MII_BMCR,
   15452 			    0x3140);
   15453 			if (rv != 0)
   15454 				return rv;
   15455 		}
   15456 	}
   15457 
   15458 	/* Select page 0 */
   15459 	if ((rv = sc->phy.acquire(sc)) != 0)
   15460 		return rv;
   15461 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15462 	sc->phy.release(sc);
   15463 	if (rv != 0)
   15464 		return rv;
   15465 
   15466 	/*
   15467 	 * Configure the K1 Si workaround during phy reset assuming there is
   15468 	 * link so that it disables K1 if link is in 1Gbps.
   15469 	 */
   15470 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15471 		return rv;
   15472 
   15473 	/* Workaround for link disconnects on a busy hub in half duplex */
   15474 	rv = sc->phy.acquire(sc);
   15475 	if (rv)
   15476 		return rv;
   15477 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15478 	if (rv)
   15479 		goto release;
   15480 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15481 	    phy_data & 0x00ff);
   15482 	if (rv)
   15483 		goto release;
   15484 
   15485 	/* set MSE higher to enable link to stay up when noise is high */
   15486 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15487 release:
   15488 	sc->phy.release(sc);
   15489 
   15490 	return rv;
   15491 
   15492 
   15493 }
   15494 
   15495 /*
   15496  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15497  *  @sc:   pointer to the HW structure
   15498  */
   15499 static void
   15500 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15501 {
   15502 	device_t dev = sc->sc_dev;
   15503 	uint32_t mac_reg;
   15504 	uint16_t i, wuce;
   15505 	int count;
   15506 
   15507 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15508 		device_xname(sc->sc_dev), __func__));
   15509 
   15510 	if (sc->phy.acquire(sc) != 0)
   15511 		return;
   15512 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15513 		goto release;
   15514 
   15515 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15516 	count = wm_rar_count(sc);
   15517 	for (i = 0; i < count; i++) {
   15518 		uint16_t lo, hi;
   15519 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15520 		lo = (uint16_t)(mac_reg & 0xffff);
   15521 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15522 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15523 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15524 
   15525 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15526 		lo = (uint16_t)(mac_reg & 0xffff);
   15527 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15528 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15529 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15530 	}
   15531 
   15532 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15533 
   15534 release:
   15535 	sc->phy.release(sc);
   15536 }
   15537 
   15538 /*
   15539  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15540  *  done after every PHY reset.
   15541  */
   15542 static int
   15543 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15544 {
   15545 	device_t dev = sc->sc_dev;
   15546 	int rv;
   15547 
   15548 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15549 		device_xname(dev), __func__));
   15550 	KASSERT(sc->sc_type == WM_T_PCH2);
   15551 
   15552 	/* Set MDIO slow mode before any other MDIO access */
   15553 	rv = wm_set_mdio_slow_mode_hv(sc);
   15554 	if (rv != 0)
   15555 		return rv;
   15556 
   15557 	rv = sc->phy.acquire(sc);
   15558 	if (rv != 0)
   15559 		return rv;
   15560 	/* set MSE higher to enable link to stay up when noise is high */
   15561 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15562 	if (rv != 0)
   15563 		goto release;
   15564 	/* drop link after 5 times MSE threshold was reached */
   15565 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15566 release:
   15567 	sc->phy.release(sc);
   15568 
   15569 	return rv;
   15570 }
   15571 
   15572 /**
   15573  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15574  *  @link: link up bool flag
   15575  *
   15576  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15577  *  preventing further DMA write requests.  Workaround the issue by disabling
   15578  *  the de-assertion of the clock request when in 1Gpbs mode.
   15579  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15580  *  speeds in order to avoid Tx hangs.
   15581  **/
   15582 static int
   15583 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15584 {
   15585 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15586 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15587 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15588 	uint16_t phyreg;
   15589 
   15590 	if (link && (speed == STATUS_SPEED_1000)) {
   15591 		sc->phy.acquire(sc);
   15592 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15593 		    &phyreg);
   15594 		if (rv != 0)
   15595 			goto release;
   15596 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15597 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15598 		if (rv != 0)
   15599 			goto release;
   15600 		delay(20);
   15601 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15602 
   15603 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15604 		    &phyreg);
   15605 release:
   15606 		sc->phy.release(sc);
   15607 		return rv;
   15608 	}
   15609 
   15610 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15611 
   15612 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15613 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15614 	    || !link
   15615 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15616 		goto update_fextnvm6;
   15617 
   15618 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15619 
   15620 	/* Clear link status transmit timeout */
   15621 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15622 	if (speed == STATUS_SPEED_100) {
   15623 		/* Set inband Tx timeout to 5x10us for 100Half */
   15624 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15625 
   15626 		/* Do not extend the K1 entry latency for 100Half */
   15627 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15628 	} else {
   15629 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15630 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15631 
   15632 		/* Extend the K1 entry latency for 10 Mbps */
   15633 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15634 	}
   15635 
   15636 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15637 
   15638 update_fextnvm6:
   15639 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15640 	return 0;
   15641 }
   15642 
   15643 /*
   15644  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15645  *  @sc:   pointer to the HW structure
   15646  *  @link: link up bool flag
   15647  *
   15648  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15649  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15650  *  If link is down, the function will restore the default K1 setting located
   15651  *  in the NVM.
   15652  */
   15653 static int
   15654 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15655 {
   15656 	int k1_enable = sc->sc_nvm_k1_enabled;
   15657 
   15658 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15659 		device_xname(sc->sc_dev), __func__));
   15660 
   15661 	if (sc->phy.acquire(sc) != 0)
   15662 		return -1;
   15663 
   15664 	if (link) {
   15665 		k1_enable = 0;
   15666 
   15667 		/* Link stall fix for link up */
   15668 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15669 		    0x0100);
   15670 	} else {
   15671 		/* Link stall fix for link down */
   15672 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15673 		    0x4100);
   15674 	}
   15675 
   15676 	wm_configure_k1_ich8lan(sc, k1_enable);
   15677 	sc->phy.release(sc);
   15678 
   15679 	return 0;
   15680 }
   15681 
   15682 /*
   15683  *  wm_k1_workaround_lv - K1 Si workaround
   15684  *  @sc:   pointer to the HW structure
   15685  *
   15686  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   15687  *  Disable K1 for 1000 and 100 speeds
   15688  */
   15689 static int
   15690 wm_k1_workaround_lv(struct wm_softc *sc)
   15691 {
   15692 	uint32_t reg;
   15693 	uint16_t phyreg;
   15694 	int rv;
   15695 
   15696 	if (sc->sc_type != WM_T_PCH2)
   15697 		return 0;
   15698 
   15699 	/* Set K1 beacon duration based on 10Mbps speed */
   15700 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   15701 	if (rv != 0)
   15702 		return rv;
   15703 
   15704 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   15705 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   15706 		if (phyreg &
   15707 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   15708 			/* LV 1G/100 Packet drop issue wa  */
   15709 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   15710 			    &phyreg);
   15711 			if (rv != 0)
   15712 				return rv;
   15713 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   15714 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   15715 			    phyreg);
   15716 			if (rv != 0)
   15717 				return rv;
   15718 		} else {
   15719 			/* For 10Mbps */
   15720 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   15721 			reg &= ~FEXTNVM4_BEACON_DURATION;
   15722 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   15723 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   15724 		}
   15725 	}
   15726 
   15727 	return 0;
   15728 }
   15729 
   15730 /*
   15731  *  wm_link_stall_workaround_hv - Si workaround
   15732  *  @sc: pointer to the HW structure
   15733  *
   15734  *  This function works around a Si bug where the link partner can get
   15735  *  a link up indication before the PHY does. If small packets are sent
   15736  *  by the link partner they can be placed in the packet buffer without
   15737  *  being properly accounted for by the PHY and will stall preventing
   15738  *  further packets from being received.  The workaround is to clear the
   15739  *  packet buffer after the PHY detects link up.
   15740  */
   15741 static int
   15742 wm_link_stall_workaround_hv(struct wm_softc *sc)
   15743 {
   15744 	uint16_t phyreg;
   15745 
   15746 	if (sc->sc_phytype != WMPHY_82578)
   15747 		return 0;
   15748 
   15749 	/* Do not apply workaround if in PHY loopback bit 14 set */
   15750 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   15751 	if ((phyreg & BMCR_LOOP) != 0)
   15752 		return 0;
   15753 
   15754 	/* check if link is up and at 1Gbps */
   15755 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   15756 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15757 	    | BM_CS_STATUS_SPEED_MASK;
   15758 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15759 		| BM_CS_STATUS_SPEED_1000))
   15760 		return 0;
   15761 
   15762 	delay(200 * 1000);	/* XXX too big */
   15763 
   15764 	/* flush the packets in the fifo buffer */
   15765 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15766 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   15767 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15768 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   15769 
   15770 	return 0;
   15771 }
   15772 
   15773 static int
   15774 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   15775 {
   15776 	int rv;
   15777 	uint16_t reg;
   15778 
   15779 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   15780 	if (rv != 0)
   15781 		return rv;
   15782 
   15783 	return  wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   15784 	    reg | HV_KMRN_MDIO_SLOW);
   15785 }
   15786 
   15787 /*
   15788  *  wm_configure_k1_ich8lan - Configure K1 power state
   15789  *  @sc: pointer to the HW structure
   15790  *  @enable: K1 state to configure
   15791  *
   15792  *  Configure the K1 power state based on the provided parameter.
   15793  *  Assumes semaphore already acquired.
   15794  */
   15795 static void
   15796 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   15797 {
   15798 	uint32_t ctrl, ctrl_ext, tmp;
   15799 	uint16_t kmreg;
   15800 	int rv;
   15801 
   15802 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15803 
   15804 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   15805 	if (rv != 0)
   15806 		return;
   15807 
   15808 	if (k1_enable)
   15809 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   15810 	else
   15811 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   15812 
   15813 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   15814 	if (rv != 0)
   15815 		return;
   15816 
   15817 	delay(20);
   15818 
   15819 	ctrl = CSR_READ(sc, WMREG_CTRL);
   15820 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   15821 
   15822 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   15823 	tmp |= CTRL_FRCSPD;
   15824 
   15825 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   15826 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   15827 	CSR_WRITE_FLUSH(sc);
   15828 	delay(20);
   15829 
   15830 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   15831 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   15832 	CSR_WRITE_FLUSH(sc);
   15833 	delay(20);
   15834 
   15835 	return;
   15836 }
   15837 
   15838 /* special case - for 82575 - need to do manual init ... */
   15839 static void
   15840 wm_reset_init_script_82575(struct wm_softc *sc)
   15841 {
   15842 	/*
   15843 	 * remark: this is untested code - we have no board without EEPROM
   15844 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   15845 	 */
   15846 
   15847 	/* SerDes configuration via SERDESCTRL */
   15848 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   15849 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   15850 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   15851 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   15852 
   15853 	/* CCM configuration via CCMCTL register */
   15854 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   15855 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   15856 
   15857 	/* PCIe lanes configuration */
   15858 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   15859 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   15860 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   15861 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   15862 
   15863 	/* PCIe PLL Configuration */
   15864 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   15865 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   15866 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   15867 }
   15868 
   15869 static void
   15870 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   15871 {
   15872 	uint32_t reg;
   15873 	uint16_t nvmword;
   15874 	int rv;
   15875 
   15876 	if (sc->sc_type != WM_T_82580)
   15877 		return;
   15878 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   15879 		return;
   15880 
   15881 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   15882 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   15883 	if (rv != 0) {
   15884 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   15885 		    __func__);
   15886 		return;
   15887 	}
   15888 
   15889 	reg = CSR_READ(sc, WMREG_MDICNFG);
   15890 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   15891 		reg |= MDICNFG_DEST;
   15892 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   15893 		reg |= MDICNFG_COM_MDIO;
   15894 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15895 }
   15896 
   15897 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   15898 
   15899 static bool
   15900 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   15901 {
   15902 	uint32_t reg;
   15903 	uint16_t id1, id2;
   15904 	int i, rv;
   15905 
   15906 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15907 		device_xname(sc->sc_dev), __func__));
   15908 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15909 
   15910 	id1 = id2 = 0xffff;
   15911 	for (i = 0; i < 2; i++) {
   15912 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   15913 		    &id1);
   15914 		if ((rv != 0) || MII_INVALIDID(id1))
   15915 			continue;
   15916 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   15917 		    &id2);
   15918 		if ((rv != 0) || MII_INVALIDID(id2))
   15919 			continue;
   15920 		break;
   15921 	}
   15922 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   15923 		goto out;
   15924 
   15925 	/*
   15926 	 * In case the PHY needs to be in mdio slow mode,
   15927 	 * set slow mode and try to get the PHY id again.
   15928 	 */
   15929 	rv = 0;
   15930 	if (sc->sc_type < WM_T_PCH_LPT) {
   15931 		sc->phy.release(sc);
   15932 		wm_set_mdio_slow_mode_hv(sc);
   15933 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   15934 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   15935 		sc->phy.acquire(sc);
   15936 	}
   15937 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   15938 		printf("XXX return with false\n");
   15939 		return false;
   15940 	}
   15941 out:
   15942 	if (sc->sc_type >= WM_T_PCH_LPT) {
   15943 		/* Only unforce SMBus if ME is not active */
   15944 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15945 			uint16_t phyreg;
   15946 
   15947 			/* Unforce SMBus mode in PHY */
   15948 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   15949 			    CV_SMB_CTRL, &phyreg);
   15950 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15951 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   15952 			    CV_SMB_CTRL, phyreg);
   15953 
   15954 			/* Unforce SMBus mode in MAC */
   15955 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15956 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15957 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15958 		}
   15959 	}
   15960 	return true;
   15961 }
   15962 
   15963 static void
   15964 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   15965 {
   15966 	uint32_t reg;
   15967 	int i;
   15968 
   15969 	/* Set PHY Config Counter to 50msec */
   15970 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   15971 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   15972 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   15973 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   15974 
   15975 	/* Toggle LANPHYPC */
   15976 	reg = CSR_READ(sc, WMREG_CTRL);
   15977 	reg |= CTRL_LANPHYPC_OVERRIDE;
   15978 	reg &= ~CTRL_LANPHYPC_VALUE;
   15979 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15980 	CSR_WRITE_FLUSH(sc);
   15981 	delay(1000);
   15982 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   15983 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15984 	CSR_WRITE_FLUSH(sc);
   15985 
   15986 	if (sc->sc_type < WM_T_PCH_LPT)
   15987 		delay(50 * 1000);
   15988 	else {
   15989 		i = 20;
   15990 
   15991 		do {
   15992 			delay(5 * 1000);
   15993 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   15994 		    && i--);
   15995 
   15996 		delay(30 * 1000);
   15997 	}
   15998 }
   15999 
   16000 static int
   16001 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16002 {
   16003 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16004 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16005 	uint32_t rxa;
   16006 	uint16_t scale = 0, lat_enc = 0;
   16007 	int32_t obff_hwm = 0;
   16008 	int64_t lat_ns, value;
   16009 
   16010 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16011 		device_xname(sc->sc_dev), __func__));
   16012 
   16013 	if (link) {
   16014 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16015 		uint32_t status;
   16016 		uint16_t speed;
   16017 		pcireg_t preg;
   16018 
   16019 		status = CSR_READ(sc, WMREG_STATUS);
   16020 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16021 		case STATUS_SPEED_10:
   16022 			speed = 10;
   16023 			break;
   16024 		case STATUS_SPEED_100:
   16025 			speed = 100;
   16026 			break;
   16027 		case STATUS_SPEED_1000:
   16028 			speed = 1000;
   16029 			break;
   16030 		default:
   16031 			device_printf(sc->sc_dev, "Unknown speed "
   16032 			    "(status = %08x)\n", status);
   16033 			return -1;
   16034 		}
   16035 
   16036 		/* Rx Packet Buffer Allocation size (KB) */
   16037 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16038 
   16039 		/*
   16040 		 * Determine the maximum latency tolerated by the device.
   16041 		 *
   16042 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16043 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16044 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16045 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16046 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16047 		 */
   16048 		lat_ns = ((int64_t)rxa * 1024 -
   16049 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16050 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16051 		if (lat_ns < 0)
   16052 			lat_ns = 0;
   16053 		else
   16054 			lat_ns /= speed;
   16055 		value = lat_ns;
   16056 
   16057 		while (value > LTRV_VALUE) {
   16058 			scale ++;
   16059 			value = howmany(value, __BIT(5));
   16060 		}
   16061 		if (scale > LTRV_SCALE_MAX) {
   16062 			printf("%s: Invalid LTR latency scale %d\n",
   16063 			    device_xname(sc->sc_dev), scale);
   16064 			return -1;
   16065 		}
   16066 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16067 
   16068 		/* Determine the maximum latency tolerated by the platform */
   16069 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16070 		    WM_PCI_LTR_CAP_LPT);
   16071 		max_snoop = preg & 0xffff;
   16072 		max_nosnoop = preg >> 16;
   16073 
   16074 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16075 
   16076 		if (lat_enc > max_ltr_enc) {
   16077 			lat_enc = max_ltr_enc;
   16078 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16079 			    * PCI_LTR_SCALETONS(
   16080 				    __SHIFTOUT(lat_enc,
   16081 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16082 		}
   16083 
   16084 		if (lat_ns) {
   16085 			lat_ns *= speed * 1000;
   16086 			lat_ns /= 8;
   16087 			lat_ns /= 1000000000;
   16088 			obff_hwm = (int32_t)(rxa - lat_ns);
   16089 		}
   16090 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16091 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16092 			    "(rxa = %d, lat_ns = %d)\n",
   16093 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16094 			return -1;
   16095 		}
   16096 	}
   16097 	/* Snoop and No-Snoop latencies the same */
   16098 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16099 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16100 
   16101 	/* Set OBFF high water mark */
   16102 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16103 	reg |= obff_hwm;
   16104 	CSR_WRITE(sc, WMREG_SVT, reg);
   16105 
   16106 	/* Enable OBFF */
   16107 	reg = CSR_READ(sc, WMREG_SVCR);
   16108 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16109 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16110 
   16111 	return 0;
   16112 }
   16113 
   16114 /*
   16115  * I210 Errata 25 and I211 Errata 10
   16116  * Slow System Clock.
   16117  */
   16118 static int
   16119 wm_pll_workaround_i210(struct wm_softc *sc)
   16120 {
   16121 	uint32_t mdicnfg, wuc;
   16122 	uint32_t reg;
   16123 	pcireg_t pcireg;
   16124 	uint32_t pmreg;
   16125 	uint16_t nvmword, tmp_nvmword;
   16126 	uint16_t phyval;
   16127 	bool wa_done = false;
   16128 	int i, rv = 0;
   16129 
   16130 	/* Get Power Management cap offset */
   16131 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16132 	    &pmreg, NULL) == 0)
   16133 		return -1;
   16134 
   16135 	/* Save WUC and MDICNFG registers */
   16136 	wuc = CSR_READ(sc, WMREG_WUC);
   16137 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16138 
   16139 	reg = mdicnfg & ~MDICNFG_DEST;
   16140 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16141 
   16142 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   16143 		nvmword = INVM_DEFAULT_AL;
   16144 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16145 
   16146 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16147 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16148 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16149 
   16150 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16151 			rv = 0;
   16152 			break; /* OK */
   16153 		} else
   16154 			rv = -1;
   16155 
   16156 		wa_done = true;
   16157 		/* Directly reset the internal PHY */
   16158 		reg = CSR_READ(sc, WMREG_CTRL);
   16159 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16160 
   16161 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16162 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16163 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16164 
   16165 		CSR_WRITE(sc, WMREG_WUC, 0);
   16166 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16167 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16168 
   16169 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16170 		    pmreg + PCI_PMCSR);
   16171 		pcireg |= PCI_PMCSR_STATE_D3;
   16172 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16173 		    pmreg + PCI_PMCSR, pcireg);
   16174 		delay(1000);
   16175 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16176 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16177 		    pmreg + PCI_PMCSR, pcireg);
   16178 
   16179 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16180 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16181 
   16182 		/* Restore WUC register */
   16183 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16184 	}
   16185 
   16186 	/* Restore MDICNFG setting */
   16187 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16188 	if (wa_done)
   16189 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16190 	return rv;
   16191 }
   16192 
   16193 static void
   16194 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16195 {
   16196 	uint32_t reg;
   16197 
   16198 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16199 		device_xname(sc->sc_dev), __func__));
   16200 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16201 	    || (sc->sc_type == WM_T_PCH_CNP));
   16202 
   16203 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16204 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16205 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16206 
   16207 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16208 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16209 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16210 }
   16211