Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.622
      1 /*	$NetBSD: if_wm.c,v 1.622 2019/01/31 05:48:32 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.622 2019/01/31 05:48:32 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/mdio.h>
    133 #include <dev/mii/miivar.h>
    134 #include <dev/mii/miidevs.h>
    135 #include <dev/mii/mii_bitbang.h>
    136 #include <dev/mii/ikphyreg.h>
    137 #include <dev/mii/igphyreg.h>
    138 #include <dev/mii/igphyvar.h>
    139 #include <dev/mii/inbmphyreg.h>
    140 #include <dev/mii/ihphyreg.h>
    141 
    142 #include <dev/pci/pcireg.h>
    143 #include <dev/pci/pcivar.h>
    144 #include <dev/pci/pcidevs.h>
    145 
    146 #include <dev/pci/if_wmreg.h>
    147 #include <dev/pci/if_wmvar.h>
    148 
    149 #ifdef WM_DEBUG
    150 #define	WM_DEBUG_LINK		__BIT(0)
    151 #define	WM_DEBUG_TX		__BIT(1)
    152 #define	WM_DEBUG_RX		__BIT(2)
    153 #define	WM_DEBUG_GMII		__BIT(3)
    154 #define	WM_DEBUG_MANAGE		__BIT(4)
    155 #define	WM_DEBUG_NVM		__BIT(5)
    156 #define	WM_DEBUG_INIT		__BIT(6)
    157 #define	WM_DEBUG_LOCK		__BIT(7)
    158 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    159     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    160 
    161 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    162 #else
    163 #define	DPRINTF(x, y)	__nothing
    164 #endif /* WM_DEBUG */
    165 
    166 #ifdef NET_MPSAFE
    167 #define WM_MPSAFE	1
    168 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    169 #else
    170 #define CALLOUT_FLAGS	0
    171 #endif
    172 
    173 /*
    174  * This device driver's max interrupt numbers.
    175  */
    176 #define WM_MAX_NQUEUEINTR	16
    177 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    178 
    179 #ifndef WM_DISABLE_MSI
    180 #define	WM_DISABLE_MSI 0
    181 #endif
    182 #ifndef WM_DISABLE_MSIX
    183 #define	WM_DISABLE_MSIX 0
    184 #endif
    185 
    186 int wm_disable_msi = WM_DISABLE_MSI;
    187 int wm_disable_msix = WM_DISABLE_MSIX;
    188 
    189 #ifndef WM_WATCHDOG_TIMEOUT
    190 #define WM_WATCHDOG_TIMEOUT 5
    191 #endif
    192 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    193 
    194 /*
    195  * Transmit descriptor list size.  Due to errata, we can only have
    196  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    197  * on >= 82544. We tell the upper layers that they can queue a lot
    198  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    199  * of them at a time.
    200  *
    201  * We allow up to 64 DMA segments per packet.  Pathological packet
    202  * chains containing many small mbufs have been observed in zero-copy
    203  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    204  * m_defrag() is called to reduce it.
    205  */
    206 #define	WM_NTXSEGS		64
    207 #define	WM_IFQUEUELEN		256
    208 #define	WM_TXQUEUELEN_MAX	64
    209 #define	WM_TXQUEUELEN_MAX_82547	16
    210 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    211 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    212 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    213 #define	WM_NTXDESC_82542	256
    214 #define	WM_NTXDESC_82544	4096
    215 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    216 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    217 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    218 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    219 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    220 
    221 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    222 
    223 #define	WM_TXINTERQSIZE		256
    224 
    225 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    226 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    227 #endif
    228 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    229 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    230 #endif
    231 
    232 /*
    233  * Receive descriptor list size.  We have one Rx buffer for normal
    234  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    235  * packet.  We allocate 256 receive descriptors, each with a 2k
    236  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    237  */
    238 #define	WM_NRXDESC		256
    239 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    240 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    241 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    242 
    243 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    244 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    245 #endif
    246 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    247 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    248 #endif
    249 
    250 typedef union txdescs {
    251 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    252 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    253 } txdescs_t;
    254 
    255 typedef union rxdescs {
    256 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    257 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    258 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    259 } rxdescs_t;
    260 
    261 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    262 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    263 
    264 /*
    265  * Software state for transmit jobs.
    266  */
    267 struct wm_txsoft {
    268 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    269 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    270 	int txs_firstdesc;		/* first descriptor in packet */
    271 	int txs_lastdesc;		/* last descriptor in packet */
    272 	int txs_ndesc;			/* # of descriptors used */
    273 };
    274 
    275 /*
    276  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    277  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    278  * them together.
    279  */
    280 struct wm_rxsoft {
    281 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    282 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    283 };
    284 
    285 #define WM_LINKUP_TIMEOUT	50
    286 
    287 static uint16_t swfwphysem[] = {
    288 	SWFW_PHY0_SM,
    289 	SWFW_PHY1_SM,
    290 	SWFW_PHY2_SM,
    291 	SWFW_PHY3_SM
    292 };
    293 
    294 static const uint32_t wm_82580_rxpbs_table[] = {
    295 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    296 };
    297 
    298 struct wm_softc;
    299 
    300 #ifdef WM_EVENT_COUNTERS
    301 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    302 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    303 	struct evcnt qname##_ev_##evname;
    304 
    305 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    306 	do {								\
    307 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    308 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    309 		    "%s%02d%s", #qname, (qnum), #evname);		\
    310 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    311 		    (evtype), NULL, (xname),				\
    312 		    (q)->qname##_##evname##_evcnt_name);		\
    313 	} while (0)
    314 
    315 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    316 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    317 
    318 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    319 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    320 
    321 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    322 	evcnt_detach(&(q)->qname##_ev_##evname);
    323 #endif /* WM_EVENT_COUNTERS */
    324 
    325 struct wm_txqueue {
    326 	kmutex_t *txq_lock;		/* lock for tx operations */
    327 
    328 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    329 
    330 	/* Software state for the transmit descriptors. */
    331 	int txq_num;			/* must be a power of two */
    332 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    333 
    334 	/* TX control data structures. */
    335 	int txq_ndesc;			/* must be a power of two */
    336 	size_t txq_descsize;		/* a tx descriptor size */
    337 	txdescs_t *txq_descs_u;
    338 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    339 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    340 	int txq_desc_rseg;		/* real number of control segment */
    341 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    342 #define	txq_descs	txq_descs_u->sctxu_txdescs
    343 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    344 
    345 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    346 
    347 	int txq_free;			/* number of free Tx descriptors */
    348 	int txq_next;			/* next ready Tx descriptor */
    349 
    350 	int txq_sfree;			/* number of free Tx jobs */
    351 	int txq_snext;			/* next free Tx job */
    352 	int txq_sdirty;			/* dirty Tx jobs */
    353 
    354 	/* These 4 variables are used only on the 82547. */
    355 	int txq_fifo_size;		/* Tx FIFO size */
    356 	int txq_fifo_head;		/* current head of FIFO */
    357 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    358 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    359 
    360 	/*
    361 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    362 	 * CPUs. This queue intermediate them without block.
    363 	 */
    364 	pcq_t *txq_interq;
    365 
    366 	/*
    367 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    368 	 * to manage Tx H/W queue's busy flag.
    369 	 */
    370 	int txq_flags;			/* flags for H/W queue, see below */
    371 #define	WM_TXQ_NO_SPACE	0x1
    372 
    373 	bool txq_stopping;
    374 
    375 	bool txq_sending;
    376 	time_t txq_lastsent;
    377 
    378 	uint32_t txq_packets;		/* for AIM */
    379 	uint32_t txq_bytes;		/* for AIM */
    380 #ifdef WM_EVENT_COUNTERS
    381 	/* TX event counters */
    382 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    383 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    384 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    385 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    386 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    387 					    /* XXX not used? */
    388 
    389 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    392 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    393 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    394 	WM_Q_EVCNT_DEFINE(txq, tsopain)     /* Painful header manip. for TSO */
    395 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    396 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    397 					    /* other than toomanyseg */
    398 
    399 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    400 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    401 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    402 
    403 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    404 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    405 #endif /* WM_EVENT_COUNTERS */
    406 };
    407 
    408 struct wm_rxqueue {
    409 	kmutex_t *rxq_lock;		/* lock for rx operations */
    410 
    411 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    412 
    413 	/* Software state for the receive descriptors. */
    414 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    415 
    416 	/* RX control data structures. */
    417 	int rxq_ndesc;			/* must be a power of two */
    418 	size_t rxq_descsize;		/* a rx descriptor size */
    419 	rxdescs_t *rxq_descs_u;
    420 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    421 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    422 	int rxq_desc_rseg;		/* real number of control segment */
    423 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    424 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    425 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    426 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    427 
    428 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    429 
    430 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    431 	int rxq_discard;
    432 	int rxq_len;
    433 	struct mbuf *rxq_head;
    434 	struct mbuf *rxq_tail;
    435 	struct mbuf **rxq_tailp;
    436 
    437 	bool rxq_stopping;
    438 
    439 	uint32_t rxq_packets;		/* for AIM */
    440 	uint32_t rxq_bytes;		/* for AIM */
    441 #ifdef WM_EVENT_COUNTERS
    442 	/* RX event counters */
    443 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    444 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    445 
    446 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    447 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    448 #endif
    449 };
    450 
    451 struct wm_queue {
    452 	int wmq_id;			/* index of TX/RX queues */
    453 	int wmq_intr_idx;		/* index of MSI-X tables */
    454 
    455 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    456 	bool wmq_set_itr;
    457 
    458 	struct wm_txqueue wmq_txq;
    459 	struct wm_rxqueue wmq_rxq;
    460 
    461 	void *wmq_si;
    462 };
    463 
    464 struct wm_phyop {
    465 	int (*acquire)(struct wm_softc *);
    466 	void (*release)(struct wm_softc *);
    467 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    468 	int (*writereg_locked)(device_t, int, int, uint16_t);
    469 	int reset_delay_us;
    470 };
    471 
    472 struct wm_nvmop {
    473 	int (*acquire)(struct wm_softc *);
    474 	void (*release)(struct wm_softc *);
    475 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    476 };
    477 
    478 /*
    479  * Software state per device.
    480  */
    481 struct wm_softc {
    482 	device_t sc_dev;		/* generic device information */
    483 	bus_space_tag_t sc_st;		/* bus space tag */
    484 	bus_space_handle_t sc_sh;	/* bus space handle */
    485 	bus_size_t sc_ss;		/* bus space size */
    486 	bus_space_tag_t sc_iot;		/* I/O space tag */
    487 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    488 	bus_size_t sc_ios;		/* I/O space size */
    489 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    490 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    491 	bus_size_t sc_flashs;		/* flash registers space size */
    492 	off_t sc_flashreg_offset;	/*
    493 					 * offset to flash registers from
    494 					 * start of BAR
    495 					 */
    496 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    497 
    498 	struct ethercom sc_ethercom;	/* ethernet common data */
    499 	struct mii_data sc_mii;		/* MII/media information */
    500 
    501 	pci_chipset_tag_t sc_pc;
    502 	pcitag_t sc_pcitag;
    503 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    504 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    505 
    506 	uint16_t sc_pcidevid;		/* PCI device ID */
    507 	wm_chip_type sc_type;		/* MAC type */
    508 	int sc_rev;			/* MAC revision */
    509 	wm_phy_type sc_phytype;		/* PHY type */
    510 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    511 #define	WM_MEDIATYPE_UNKNOWN		0x00
    512 #define	WM_MEDIATYPE_FIBER		0x01
    513 #define	WM_MEDIATYPE_COPPER		0x02
    514 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    515 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    516 	int sc_flags;			/* flags; see below */
    517 	int sc_if_flags;		/* last if_flags */
    518 	int sc_ec_capenable;		/* last ec_capenable */
    519 	int sc_flowflags;		/* 802.3x flow control flags */
    520 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    521 	int sc_align_tweak;
    522 
    523 	void *sc_ihs[WM_MAX_NINTR];	/*
    524 					 * interrupt cookie.
    525 					 * - legacy and msi use sc_ihs[0] only
    526 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    527 					 */
    528 	pci_intr_handle_t *sc_intrs;	/*
    529 					 * legacy and msi use sc_intrs[0] only
    530 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    531 					 */
    532 	int sc_nintrs;			/* number of interrupts */
    533 
    534 	int sc_link_intr_idx;		/* index of MSI-X tables */
    535 
    536 	callout_t sc_tick_ch;		/* tick callout */
    537 	bool sc_core_stopping;
    538 
    539 	int sc_nvm_ver_major;
    540 	int sc_nvm_ver_minor;
    541 	int sc_nvm_ver_build;
    542 	int sc_nvm_addrbits;		/* NVM address bits */
    543 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    544 	int sc_ich8_flash_base;
    545 	int sc_ich8_flash_bank_size;
    546 	int sc_nvm_k1_enabled;
    547 
    548 	int sc_nqueues;
    549 	struct wm_queue *sc_queue;
    550 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    551 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    552 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    553 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    554 
    555 	int sc_affinity_offset;
    556 
    557 #ifdef WM_EVENT_COUNTERS
    558 	/* Event counters. */
    559 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    560 
    561 	/* WM_T_82542_2_1 only */
    562 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    563 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    564 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    565 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    566 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    567 #endif /* WM_EVENT_COUNTERS */
    568 
    569 	/* This variable are used only on the 82547. */
    570 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    571 
    572 	uint32_t sc_ctrl;		/* prototype CTRL register */
    573 #if 0
    574 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    575 #endif
    576 	uint32_t sc_icr;		/* prototype interrupt bits */
    577 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    578 	uint32_t sc_tctl;		/* prototype TCTL register */
    579 	uint32_t sc_rctl;		/* prototype RCTL register */
    580 	uint32_t sc_txcw;		/* prototype TXCW register */
    581 	uint32_t sc_tipg;		/* prototype TIPG register */
    582 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    583 	uint32_t sc_pba;		/* prototype PBA register */
    584 
    585 	int sc_tbi_linkup;		/* TBI link status */
    586 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    587 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    588 
    589 	int sc_mchash_type;		/* multicast filter offset */
    590 
    591 	krndsource_t rnd_source;	/* random source */
    592 
    593 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    594 
    595 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    596 	kmutex_t *sc_ich_phymtx;	/*
    597 					 * 82574/82583/ICH/PCH specific PHY
    598 					 * mutex. For 82574/82583, the mutex
    599 					 * is used for both PHY and NVM.
    600 					 */
    601 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    602 
    603 	struct wm_phyop phy;
    604 	struct wm_nvmop nvm;
    605 };
    606 
    607 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    608 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    609 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    610 
    611 #define	WM_RXCHAIN_RESET(rxq)						\
    612 do {									\
    613 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    614 	*(rxq)->rxq_tailp = NULL;					\
    615 	(rxq)->rxq_len = 0;						\
    616 } while (/*CONSTCOND*/0)
    617 
    618 #define	WM_RXCHAIN_LINK(rxq, m)						\
    619 do {									\
    620 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    621 	(rxq)->rxq_tailp = &(m)->m_next;				\
    622 } while (/*CONSTCOND*/0)
    623 
    624 #ifdef WM_EVENT_COUNTERS
    625 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    626 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    627 
    628 #define WM_Q_EVCNT_INCR(qname, evname)			\
    629 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    630 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    631 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    632 #else /* !WM_EVENT_COUNTERS */
    633 #define	WM_EVCNT_INCR(ev)	/* nothing */
    634 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    635 
    636 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    637 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    638 #endif /* !WM_EVENT_COUNTERS */
    639 
    640 #define	CSR_READ(sc, reg)						\
    641 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    642 #define	CSR_WRITE(sc, reg, val)						\
    643 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    644 #define	CSR_WRITE_FLUSH(sc)						\
    645 	(void) CSR_READ((sc), WMREG_STATUS)
    646 
    647 #define ICH8_FLASH_READ32(sc, reg)					\
    648 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    649 	    (reg) + sc->sc_flashreg_offset)
    650 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    651 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    652 	    (reg) + sc->sc_flashreg_offset, (data))
    653 
    654 #define ICH8_FLASH_READ16(sc, reg)					\
    655 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    656 	    (reg) + sc->sc_flashreg_offset)
    657 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    658 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    659 	    (reg) + sc->sc_flashreg_offset, (data))
    660 
    661 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    662 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    663 
    664 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    665 #define	WM_CDTXADDR_HI(txq, x)						\
    666 	(sizeof(bus_addr_t) == 8 ?					\
    667 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    668 
    669 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    670 #define	WM_CDRXADDR_HI(rxq, x)						\
    671 	(sizeof(bus_addr_t) == 8 ?					\
    672 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    673 
    674 /*
    675  * Register read/write functions.
    676  * Other than CSR_{READ|WRITE}().
    677  */
    678 #if 0
    679 static inline uint32_t wm_io_read(struct wm_softc *, int);
    680 #endif
    681 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    682 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    683     uint32_t, uint32_t);
    684 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    685 
    686 /*
    687  * Descriptor sync/init functions.
    688  */
    689 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    690 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    691 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    692 
    693 /*
    694  * Device driver interface functions and commonly used functions.
    695  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    696  */
    697 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    698 static int	wm_match(device_t, cfdata_t, void *);
    699 static void	wm_attach(device_t, device_t, void *);
    700 static int	wm_detach(device_t, int);
    701 static bool	wm_suspend(device_t, const pmf_qual_t *);
    702 static bool	wm_resume(device_t, const pmf_qual_t *);
    703 static void	wm_watchdog(struct ifnet *);
    704 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    705     uint16_t *);
    706 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    707     uint16_t *);
    708 static void	wm_tick(void *);
    709 static int	wm_ifflags_cb(struct ethercom *);
    710 static int	wm_ioctl(struct ifnet *, u_long, void *);
    711 /* MAC address related */
    712 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    713 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    714 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    715 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    716 static int	wm_rar_count(struct wm_softc *);
    717 static void	wm_set_filter(struct wm_softc *);
    718 /* Reset and init related */
    719 static void	wm_set_vlan(struct wm_softc *);
    720 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    721 static void	wm_get_auto_rd_done(struct wm_softc *);
    722 static void	wm_lan_init_done(struct wm_softc *);
    723 static void	wm_get_cfg_done(struct wm_softc *);
    724 static int	wm_phy_post_reset(struct wm_softc *);
    725 static int	wm_write_smbus_addr(struct wm_softc *);
    726 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    727 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    728 static void	wm_initialize_hardware_bits(struct wm_softc *);
    729 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    730 static int	wm_reset_phy(struct wm_softc *);
    731 static void	wm_flush_desc_rings(struct wm_softc *);
    732 static void	wm_reset(struct wm_softc *);
    733 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    734 static void	wm_rxdrain(struct wm_rxqueue *);
    735 static void	wm_init_rss(struct wm_softc *);
    736 static void	wm_adjust_qnum(struct wm_softc *, int);
    737 static inline bool	wm_is_using_msix(struct wm_softc *);
    738 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    739 static int	wm_softint_establish(struct wm_softc *, int, int);
    740 static int	wm_setup_legacy(struct wm_softc *);
    741 static int	wm_setup_msix(struct wm_softc *);
    742 static int	wm_init(struct ifnet *);
    743 static int	wm_init_locked(struct ifnet *);
    744 static void	wm_unset_stopping_flags(struct wm_softc *);
    745 static void	wm_set_stopping_flags(struct wm_softc *);
    746 static void	wm_stop(struct ifnet *, int);
    747 static void	wm_stop_locked(struct ifnet *, int);
    748 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    749 static void	wm_82547_txfifo_stall(void *);
    750 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    751 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    752 /* DMA related */
    753 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    754 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    755 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    756 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    757     struct wm_txqueue *);
    758 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    759 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    760 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    761     struct wm_rxqueue *);
    762 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    763 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    764 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    765 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    766 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    767 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    768 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    769     struct wm_txqueue *);
    770 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    771     struct wm_rxqueue *);
    772 static int	wm_alloc_txrx_queues(struct wm_softc *);
    773 static void	wm_free_txrx_queues(struct wm_softc *);
    774 static int	wm_init_txrx_queues(struct wm_softc *);
    775 /* Start */
    776 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    777     struct wm_txsoft *, uint32_t *, uint8_t *);
    778 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    779 static void	wm_start(struct ifnet *);
    780 static void	wm_start_locked(struct ifnet *);
    781 static int	wm_transmit(struct ifnet *, struct mbuf *);
    782 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    783 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    784     bool);
    785 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    786     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    787 static void	wm_nq_start(struct ifnet *);
    788 static void	wm_nq_start_locked(struct ifnet *);
    789 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    790 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    791 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    792     bool);
    793 static void	wm_deferred_start_locked(struct wm_txqueue *);
    794 static void	wm_handle_queue(void *);
    795 /* Interrupt */
    796 static bool	wm_txeof(struct wm_txqueue *, u_int);
    797 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    798 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    799 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    800 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    801 static void	wm_linkintr(struct wm_softc *, uint32_t);
    802 static int	wm_intr_legacy(void *);
    803 static inline void	wm_txrxintr_disable(struct wm_queue *);
    804 static inline void	wm_txrxintr_enable(struct wm_queue *);
    805 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    806 static int	wm_txrxintr_msix(void *);
    807 static int	wm_linkintr_msix(void *);
    808 
    809 /*
    810  * Media related.
    811  * GMII, SGMII, TBI, SERDES and SFP.
    812  */
    813 /* Common */
    814 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    815 /* GMII related */
    816 static void	wm_gmii_reset(struct wm_softc *);
    817 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    818 static int	wm_get_phy_id_82575(struct wm_softc *);
    819 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    820 static int	wm_gmii_mediachange(struct ifnet *);
    821 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    822 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    823 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    824 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    825 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    826 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    827 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    828 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    829 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    830 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    831 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    832 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    833 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    834 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    835 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    836 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    837 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    838 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    839 	bool);
    840 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    841 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    842 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    843 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    844 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    845 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    846 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    847 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    848 static void	wm_gmii_statchg(struct ifnet *);
    849 /*
    850  * kumeran related (80003, ICH* and PCH*).
    851  * These functions are not for accessing MII registers but for accessing
    852  * kumeran specific registers.
    853  */
    854 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    855 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    856 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    857 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    858 /* EMI register related */
    859 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    860 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    861 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    862 /* SGMII */
    863 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    864 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    865 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    866 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    867 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    868 /* TBI related */
    869 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    870 static void	wm_tbi_mediainit(struct wm_softc *);
    871 static int	wm_tbi_mediachange(struct ifnet *);
    872 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    873 static int	wm_check_for_link(struct wm_softc *);
    874 static void	wm_tbi_tick(struct wm_softc *);
    875 /* SERDES related */
    876 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    877 static int	wm_serdes_mediachange(struct ifnet *);
    878 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    879 static void	wm_serdes_tick(struct wm_softc *);
    880 /* SFP related */
    881 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    882 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    883 
    884 /*
    885  * NVM related.
    886  * Microwire, SPI (w/wo EERD) and Flash.
    887  */
    888 /* Misc functions */
    889 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    890 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    891 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    892 /* Microwire */
    893 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    894 /* SPI */
    895 static int	wm_nvm_ready_spi(struct wm_softc *);
    896 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    897 /* Using with EERD */
    898 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    899 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    900 /* Flash */
    901 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    902     unsigned int *);
    903 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    904 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    905 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    906     uint32_t *);
    907 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    908 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    909 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    910 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    911 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    912 /* iNVM */
    913 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    914 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    915 /* Lock, detecting NVM type, validate checksum and read */
    916 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    917 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    918 static int	wm_nvm_validate_checksum(struct wm_softc *);
    919 static void	wm_nvm_version_invm(struct wm_softc *);
    920 static void	wm_nvm_version(struct wm_softc *);
    921 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    922 
    923 /*
    924  * Hardware semaphores.
    925  * Very complexed...
    926  */
    927 static int	wm_get_null(struct wm_softc *);
    928 static void	wm_put_null(struct wm_softc *);
    929 static int	wm_get_eecd(struct wm_softc *);
    930 static void	wm_put_eecd(struct wm_softc *);
    931 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    932 static void	wm_put_swsm_semaphore(struct wm_softc *);
    933 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    934 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    935 static int	wm_get_nvm_80003(struct wm_softc *);
    936 static void	wm_put_nvm_80003(struct wm_softc *);
    937 static int	wm_get_nvm_82571(struct wm_softc *);
    938 static void	wm_put_nvm_82571(struct wm_softc *);
    939 static int	wm_get_phy_82575(struct wm_softc *);
    940 static void	wm_put_phy_82575(struct wm_softc *);
    941 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    942 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    943 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    944 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    945 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    946 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    947 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    948 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    949 
    950 /*
    951  * Management mode and power management related subroutines.
    952  * BMC, AMT, suspend/resume and EEE.
    953  */
    954 #if 0
    955 static int	wm_check_mng_mode(struct wm_softc *);
    956 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    957 static int	wm_check_mng_mode_82574(struct wm_softc *);
    958 static int	wm_check_mng_mode_generic(struct wm_softc *);
    959 #endif
    960 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    961 static bool	wm_phy_resetisblocked(struct wm_softc *);
    962 static void	wm_get_hw_control(struct wm_softc *);
    963 static void	wm_release_hw_control(struct wm_softc *);
    964 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    965 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    966 static void	wm_init_manageability(struct wm_softc *);
    967 static void	wm_release_manageability(struct wm_softc *);
    968 static void	wm_get_wakeup(struct wm_softc *);
    969 static int	wm_ulp_disable(struct wm_softc *);
    970 static int	wm_enable_phy_wakeup(struct wm_softc *);
    971 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    972 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    973 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    974 static void	wm_enable_wakeup(struct wm_softc *);
    975 static void	wm_disable_aspm(struct wm_softc *);
    976 /* LPLU (Low Power Link Up) */
    977 static void	wm_lplu_d0_disable(struct wm_softc *);
    978 /* EEE */
    979 static int	wm_set_eee_i350(struct wm_softc *);
    980 static int	wm_set_eee_pchlan(struct wm_softc *);
    981 static int	wm_set_eee(struct wm_softc *);
    982 
    983 /*
    984  * Workarounds (mainly PHY related).
    985  * Basically, PHY's workarounds are in the PHY drivers.
    986  */
    987 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    988 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    989 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
    990 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
    991 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
    992 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    993 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    994 static int	wm_k1_workaround_lv(struct wm_softc *);
    995 static int	wm_link_stall_workaround_hv(struct wm_softc *);
    996 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    997 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    998 static void	wm_reset_init_script_82575(struct wm_softc *);
    999 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1000 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1001 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1002 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1003 static int	wm_pll_workaround_i210(struct wm_softc *);
   1004 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1005 
   1006 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1007     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1008 
   1009 /*
   1010  * Devices supported by this driver.
   1011  */
   1012 static const struct wm_product {
   1013 	pci_vendor_id_t		wmp_vendor;
   1014 	pci_product_id_t	wmp_product;
   1015 	const char		*wmp_name;
   1016 	wm_chip_type		wmp_type;
   1017 	uint32_t		wmp_flags;
   1018 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1019 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1020 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1021 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1022 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1023 } wm_products[] = {
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1025 	  "Intel i82542 1000BASE-X Ethernet",
   1026 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1029 	  "Intel i82543GC 1000BASE-X Ethernet",
   1030 	  WM_T_82543,		WMP_F_FIBER },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1033 	  "Intel i82543GC 1000BASE-T Ethernet",
   1034 	  WM_T_82543,		WMP_F_COPPER },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1037 	  "Intel i82544EI 1000BASE-T Ethernet",
   1038 	  WM_T_82544,		WMP_F_COPPER },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1041 	  "Intel i82544EI 1000BASE-X Ethernet",
   1042 	  WM_T_82544,		WMP_F_FIBER },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1045 	  "Intel i82544GC 1000BASE-T Ethernet",
   1046 	  WM_T_82544,		WMP_F_COPPER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1049 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1050 	  WM_T_82544,		WMP_F_COPPER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1053 	  "Intel i82540EM 1000BASE-T Ethernet",
   1054 	  WM_T_82540,		WMP_F_COPPER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1057 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1058 	  WM_T_82540,		WMP_F_COPPER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1061 	  "Intel i82540EP 1000BASE-T Ethernet",
   1062 	  WM_T_82540,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1065 	  "Intel i82540EP 1000BASE-T Ethernet",
   1066 	  WM_T_82540,		WMP_F_COPPER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1069 	  "Intel i82540EP 1000BASE-T Ethernet",
   1070 	  WM_T_82540,		WMP_F_COPPER },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1073 	  "Intel i82545EM 1000BASE-T Ethernet",
   1074 	  WM_T_82545,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1077 	  "Intel i82545GM 1000BASE-T Ethernet",
   1078 	  WM_T_82545_3,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1081 	  "Intel i82545GM 1000BASE-X Ethernet",
   1082 	  WM_T_82545_3,		WMP_F_FIBER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1085 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1086 	  WM_T_82545_3,		WMP_F_SERDES },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1089 	  "Intel i82546EB 1000BASE-T Ethernet",
   1090 	  WM_T_82546,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1093 	  "Intel i82546EB 1000BASE-T Ethernet",
   1094 	  WM_T_82546,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1097 	  "Intel i82545EM 1000BASE-X Ethernet",
   1098 	  WM_T_82545,		WMP_F_FIBER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1101 	  "Intel i82546EB 1000BASE-X Ethernet",
   1102 	  WM_T_82546,		WMP_F_FIBER },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1105 	  "Intel i82546GB 1000BASE-T Ethernet",
   1106 	  WM_T_82546_3,		WMP_F_COPPER },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1109 	  "Intel i82546GB 1000BASE-X Ethernet",
   1110 	  WM_T_82546_3,		WMP_F_FIBER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1113 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1114 	  WM_T_82546_3,		WMP_F_SERDES },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1117 	  "i82546GB quad-port Gigabit Ethernet",
   1118 	  WM_T_82546_3,		WMP_F_COPPER },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1121 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1122 	  WM_T_82546_3,		WMP_F_COPPER },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1125 	  "Intel PRO/1000MT (82546GB)",
   1126 	  WM_T_82546_3,		WMP_F_COPPER },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1129 	  "Intel i82541EI 1000BASE-T Ethernet",
   1130 	  WM_T_82541,		WMP_F_COPPER },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1133 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1134 	  WM_T_82541,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1137 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1138 	  WM_T_82541,		WMP_F_COPPER },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1141 	  "Intel i82541ER 1000BASE-T Ethernet",
   1142 	  WM_T_82541_2,		WMP_F_COPPER },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1145 	  "Intel i82541GI 1000BASE-T Ethernet",
   1146 	  WM_T_82541_2,		WMP_F_COPPER },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1149 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1150 	  WM_T_82541_2,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1153 	  "Intel i82541PI 1000BASE-T Ethernet",
   1154 	  WM_T_82541_2,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1157 	  "Intel i82547EI 1000BASE-T Ethernet",
   1158 	  WM_T_82547,		WMP_F_COPPER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1161 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1162 	  WM_T_82547,		WMP_F_COPPER },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1165 	  "Intel i82547GI 1000BASE-T Ethernet",
   1166 	  WM_T_82547_2,		WMP_F_COPPER },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1169 	  "Intel PRO/1000 PT (82571EB)",
   1170 	  WM_T_82571,		WMP_F_COPPER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1173 	  "Intel PRO/1000 PF (82571EB)",
   1174 	  WM_T_82571,		WMP_F_FIBER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1177 	  "Intel PRO/1000 PB (82571EB)",
   1178 	  WM_T_82571,		WMP_F_SERDES },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1181 	  "Intel PRO/1000 QT (82571EB)",
   1182 	  WM_T_82571,		WMP_F_COPPER },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1185 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1186 	  WM_T_82571,		WMP_F_COPPER, },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1189 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1190 	  WM_T_82571,		WMP_F_COPPER, },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1193 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1194 	  WM_T_82571,		WMP_F_SERDES, },
   1195 
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1197 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1198 	  WM_T_82571,		WMP_F_SERDES, },
   1199 
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1201 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1202 	  WM_T_82571,		WMP_F_FIBER, },
   1203 
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1205 	  "Intel i82572EI 1000baseT Ethernet",
   1206 	  WM_T_82572,		WMP_F_COPPER },
   1207 
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1209 	  "Intel i82572EI 1000baseX Ethernet",
   1210 	  WM_T_82572,		WMP_F_FIBER },
   1211 
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1213 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1214 	  WM_T_82572,		WMP_F_SERDES },
   1215 
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1217 	  "Intel i82572EI 1000baseT Ethernet",
   1218 	  WM_T_82572,		WMP_F_COPPER },
   1219 
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1221 	  "Intel i82573E",
   1222 	  WM_T_82573,		WMP_F_COPPER },
   1223 
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1225 	  "Intel i82573E IAMT",
   1226 	  WM_T_82573,		WMP_F_COPPER },
   1227 
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1229 	  "Intel i82573L Gigabit Ethernet",
   1230 	  WM_T_82573,		WMP_F_COPPER },
   1231 
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1233 	  "Intel i82574L",
   1234 	  WM_T_82574,		WMP_F_COPPER },
   1235 
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1237 	  "Intel i82574L",
   1238 	  WM_T_82574,		WMP_F_COPPER },
   1239 
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1241 	  "Intel i82583V",
   1242 	  WM_T_82583,		WMP_F_COPPER },
   1243 
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1245 	  "i80003 dual 1000baseT Ethernet",
   1246 	  WM_T_80003,		WMP_F_COPPER },
   1247 
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1249 	  "i80003 dual 1000baseX Ethernet",
   1250 	  WM_T_80003,		WMP_F_COPPER },
   1251 
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1253 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1254 	  WM_T_80003,		WMP_F_SERDES },
   1255 
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1257 	  "Intel i80003 1000baseT Ethernet",
   1258 	  WM_T_80003,		WMP_F_COPPER },
   1259 
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1261 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1262 	  WM_T_80003,		WMP_F_SERDES },
   1263 
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1265 	  "Intel i82801H (M_AMT) LAN Controller",
   1266 	  WM_T_ICH8,		WMP_F_COPPER },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1268 	  "Intel i82801H (AMT) LAN Controller",
   1269 	  WM_T_ICH8,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1271 	  "Intel i82801H LAN Controller",
   1272 	  WM_T_ICH8,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1274 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1275 	  WM_T_ICH8,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1277 	  "Intel i82801H (M) LAN Controller",
   1278 	  WM_T_ICH8,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1280 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1281 	  WM_T_ICH8,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1283 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1284 	  WM_T_ICH8,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1286 	  "82567V-3 LAN Controller",
   1287 	  WM_T_ICH8,		WMP_F_COPPER },
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1289 	  "82801I (AMT) LAN Controller",
   1290 	  WM_T_ICH9,		WMP_F_COPPER },
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1292 	  "82801I 10/100 LAN Controller",
   1293 	  WM_T_ICH9,		WMP_F_COPPER },
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1295 	  "82801I (G) 10/100 LAN Controller",
   1296 	  WM_T_ICH9,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1298 	  "82801I (GT) 10/100 LAN Controller",
   1299 	  WM_T_ICH9,		WMP_F_COPPER },
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1301 	  "82801I (C) LAN Controller",
   1302 	  WM_T_ICH9,		WMP_F_COPPER },
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1304 	  "82801I mobile LAN Controller",
   1305 	  WM_T_ICH9,		WMP_F_COPPER },
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1307 	  "82801I mobile (V) LAN Controller",
   1308 	  WM_T_ICH9,		WMP_F_COPPER },
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1310 	  "82801I mobile (AMT) LAN Controller",
   1311 	  WM_T_ICH9,		WMP_F_COPPER },
   1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1313 	  "82567LM-4 LAN Controller",
   1314 	  WM_T_ICH9,		WMP_F_COPPER },
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1316 	  "82567LM-2 LAN Controller",
   1317 	  WM_T_ICH10,		WMP_F_COPPER },
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1319 	  "82567LF-2 LAN Controller",
   1320 	  WM_T_ICH10,		WMP_F_COPPER },
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1322 	  "82567LM-3 LAN Controller",
   1323 	  WM_T_ICH10,		WMP_F_COPPER },
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1325 	  "82567LF-3 LAN Controller",
   1326 	  WM_T_ICH10,		WMP_F_COPPER },
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1328 	  "82567V-2 LAN Controller",
   1329 	  WM_T_ICH10,		WMP_F_COPPER },
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1331 	  "82567V-3? LAN Controller",
   1332 	  WM_T_ICH10,		WMP_F_COPPER },
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1334 	  "HANKSVILLE LAN Controller",
   1335 	  WM_T_ICH10,		WMP_F_COPPER },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1337 	  "PCH LAN (82577LM) Controller",
   1338 	  WM_T_PCH,		WMP_F_COPPER },
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1340 	  "PCH LAN (82577LC) Controller",
   1341 	  WM_T_PCH,		WMP_F_COPPER },
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1343 	  "PCH LAN (82578DM) Controller",
   1344 	  WM_T_PCH,		WMP_F_COPPER },
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1346 	  "PCH LAN (82578DC) Controller",
   1347 	  WM_T_PCH,		WMP_F_COPPER },
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1349 	  "PCH2 LAN (82579LM) Controller",
   1350 	  WM_T_PCH2,		WMP_F_COPPER },
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1352 	  "PCH2 LAN (82579V) Controller",
   1353 	  WM_T_PCH2,		WMP_F_COPPER },
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1355 	  "82575EB dual-1000baseT Ethernet",
   1356 	  WM_T_82575,		WMP_F_COPPER },
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1358 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1359 	  WM_T_82575,		WMP_F_SERDES },
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1361 	  "82575GB quad-1000baseT Ethernet",
   1362 	  WM_T_82575,		WMP_F_COPPER },
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1364 	  "82575GB quad-1000baseT Ethernet (PM)",
   1365 	  WM_T_82575,		WMP_F_COPPER },
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1367 	  "82576 1000BaseT Ethernet",
   1368 	  WM_T_82576,		WMP_F_COPPER },
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1370 	  "82576 1000BaseX Ethernet",
   1371 	  WM_T_82576,		WMP_F_FIBER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1374 	  "82576 gigabit Ethernet (SERDES)",
   1375 	  WM_T_82576,		WMP_F_SERDES },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1378 	  "82576 quad-1000BaseT Ethernet",
   1379 	  WM_T_82576,		WMP_F_COPPER },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1382 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1383 	  WM_T_82576,		WMP_F_COPPER },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1386 	  "82576 gigabit Ethernet",
   1387 	  WM_T_82576,		WMP_F_COPPER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1390 	  "82576 gigabit Ethernet (SERDES)",
   1391 	  WM_T_82576,		WMP_F_SERDES },
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1393 	  "82576 quad-gigabit Ethernet (SERDES)",
   1394 	  WM_T_82576,		WMP_F_SERDES },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1397 	  "82580 1000BaseT Ethernet",
   1398 	  WM_T_82580,		WMP_F_COPPER },
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1400 	  "82580 1000BaseX Ethernet",
   1401 	  WM_T_82580,		WMP_F_FIBER },
   1402 
   1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1404 	  "82580 1000BaseT Ethernet (SERDES)",
   1405 	  WM_T_82580,		WMP_F_SERDES },
   1406 
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1408 	  "82580 gigabit Ethernet (SGMII)",
   1409 	  WM_T_82580,		WMP_F_COPPER },
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1411 	  "82580 dual-1000BaseT Ethernet",
   1412 	  WM_T_82580,		WMP_F_COPPER },
   1413 
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1415 	  "82580 quad-1000BaseX Ethernet",
   1416 	  WM_T_82580,		WMP_F_FIBER },
   1417 
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1419 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1420 	  WM_T_82580,		WMP_F_COPPER },
   1421 
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1423 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1424 	  WM_T_82580,		WMP_F_SERDES },
   1425 
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1427 	  "DH89XXCC 1000BASE-KX Ethernet",
   1428 	  WM_T_82580,		WMP_F_SERDES },
   1429 
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1431 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1432 	  WM_T_82580,		WMP_F_SERDES },
   1433 
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1435 	  "I350 Gigabit Network Connection",
   1436 	  WM_T_I350,		WMP_F_COPPER },
   1437 
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1439 	  "I350 Gigabit Fiber Network Connection",
   1440 	  WM_T_I350,		WMP_F_FIBER },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1443 	  "I350 Gigabit Backplane Connection",
   1444 	  WM_T_I350,		WMP_F_SERDES },
   1445 
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1447 	  "I350 Quad Port Gigabit Ethernet",
   1448 	  WM_T_I350,		WMP_F_SERDES },
   1449 
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1451 	  "I350 Gigabit Connection",
   1452 	  WM_T_I350,		WMP_F_COPPER },
   1453 
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1455 	  "I354 Gigabit Ethernet (KX)",
   1456 	  WM_T_I354,		WMP_F_SERDES },
   1457 
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1459 	  "I354 Gigabit Ethernet (SGMII)",
   1460 	  WM_T_I354,		WMP_F_COPPER },
   1461 
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1463 	  "I354 Gigabit Ethernet (2.5G)",
   1464 	  WM_T_I354,		WMP_F_COPPER },
   1465 
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1467 	  "I210-T1 Ethernet Server Adapter",
   1468 	  WM_T_I210,		WMP_F_COPPER },
   1469 
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1471 	  "I210 Ethernet (Copper OEM)",
   1472 	  WM_T_I210,		WMP_F_COPPER },
   1473 
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1475 	  "I210 Ethernet (Copper IT)",
   1476 	  WM_T_I210,		WMP_F_COPPER },
   1477 
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1479 	  "I210 Ethernet (FLASH less)",
   1480 	  WM_T_I210,		WMP_F_COPPER },
   1481 
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1483 	  "I210 Gigabit Ethernet (Fiber)",
   1484 	  WM_T_I210,		WMP_F_FIBER },
   1485 
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1487 	  "I210 Gigabit Ethernet (SERDES)",
   1488 	  WM_T_I210,		WMP_F_SERDES },
   1489 
   1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1491 	  "I210 Gigabit Ethernet (FLASH less)",
   1492 	  WM_T_I210,		WMP_F_SERDES },
   1493 
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1495 	  "I210 Gigabit Ethernet (SGMII)",
   1496 	  WM_T_I210,		WMP_F_COPPER },
   1497 
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1499 	  "I211 Ethernet (COPPER)",
   1500 	  WM_T_I211,		WMP_F_COPPER },
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1502 	  "I217 V Ethernet Connection",
   1503 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1505 	  "I217 LM Ethernet Connection",
   1506 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1508 	  "I218 V Ethernet Connection",
   1509 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1511 	  "I218 V Ethernet Connection",
   1512 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1514 	  "I218 V Ethernet Connection",
   1515 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1517 	  "I218 LM Ethernet Connection",
   1518 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1520 	  "I218 LM Ethernet Connection",
   1521 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1523 	  "I218 LM Ethernet Connection",
   1524 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1526 	  "I219 V Ethernet Connection",
   1527 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1529 	  "I219 V Ethernet Connection",
   1530 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1532 	  "I219 V Ethernet Connection",
   1533 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1535 	  "I219 V Ethernet Connection",
   1536 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1538 	  "I219 LM Ethernet Connection",
   1539 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1541 	  "I219 LM Ethernet Connection",
   1542 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1544 	  "I219 LM Ethernet Connection",
   1545 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1547 	  "I219 LM Ethernet Connection",
   1548 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1550 	  "I219 LM Ethernet Connection",
   1551 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1553 	  "I219 V Ethernet Connection",
   1554 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1556 	  "I219 V Ethernet Connection",
   1557 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1559 	  "I219 LM Ethernet Connection",
   1560 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1562 	  "I219 LM Ethernet Connection",
   1563 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1564 	{ 0,			0,
   1565 	  NULL,
   1566 	  0,			0 },
   1567 };
   1568 
   1569 /*
   1570  * Register read/write functions.
   1571  * Other than CSR_{READ|WRITE}().
   1572  */
   1573 
   1574 #if 0 /* Not currently used */
   1575 static inline uint32_t
   1576 wm_io_read(struct wm_softc *sc, int reg)
   1577 {
   1578 
   1579 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1580 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1581 }
   1582 #endif
   1583 
   1584 static inline void
   1585 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1586 {
   1587 
   1588 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1589 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1590 }
   1591 
   1592 static inline void
   1593 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1594     uint32_t data)
   1595 {
   1596 	uint32_t regval;
   1597 	int i;
   1598 
   1599 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1600 
   1601 	CSR_WRITE(sc, reg, regval);
   1602 
   1603 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1604 		delay(5);
   1605 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1606 			break;
   1607 	}
   1608 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1609 		aprint_error("%s: WARNING:"
   1610 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1611 		    device_xname(sc->sc_dev), reg);
   1612 	}
   1613 }
   1614 
   1615 static inline void
   1616 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1617 {
   1618 	wa->wa_low = htole32(v & 0xffffffffU);
   1619 	if (sizeof(bus_addr_t) == 8)
   1620 		wa->wa_high = htole32((uint64_t) v >> 32);
   1621 	else
   1622 		wa->wa_high = 0;
   1623 }
   1624 
   1625 /*
   1626  * Descriptor sync/init functions.
   1627  */
   1628 static inline void
   1629 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1630 {
   1631 	struct wm_softc *sc = txq->txq_sc;
   1632 
   1633 	/* If it will wrap around, sync to the end of the ring. */
   1634 	if ((start + num) > WM_NTXDESC(txq)) {
   1635 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1636 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1637 		    (WM_NTXDESC(txq) - start), ops);
   1638 		num -= (WM_NTXDESC(txq) - start);
   1639 		start = 0;
   1640 	}
   1641 
   1642 	/* Now sync whatever is left. */
   1643 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1644 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1645 }
   1646 
   1647 static inline void
   1648 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1649 {
   1650 	struct wm_softc *sc = rxq->rxq_sc;
   1651 
   1652 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1653 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1654 }
   1655 
   1656 static inline void
   1657 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1658 {
   1659 	struct wm_softc *sc = rxq->rxq_sc;
   1660 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1661 	struct mbuf *m = rxs->rxs_mbuf;
   1662 
   1663 	/*
   1664 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1665 	 * so that the payload after the Ethernet header is aligned
   1666 	 * to a 4-byte boundary.
   1667 
   1668 	 * XXX BRAINDAMAGE ALERT!
   1669 	 * The stupid chip uses the same size for every buffer, which
   1670 	 * is set in the Receive Control register.  We are using the 2K
   1671 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1672 	 * reason, we can't "scoot" packets longer than the standard
   1673 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1674 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1675 	 * the upper layer copy the headers.
   1676 	 */
   1677 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1678 
   1679 	if (sc->sc_type == WM_T_82574) {
   1680 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1681 		rxd->erx_data.erxd_addr =
   1682 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1683 		rxd->erx_data.erxd_dd = 0;
   1684 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1685 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1686 
   1687 		rxd->nqrx_data.nrxd_paddr =
   1688 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1689 		/* Currently, split header is not supported. */
   1690 		rxd->nqrx_data.nrxd_haddr = 0;
   1691 	} else {
   1692 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1693 
   1694 		wm_set_dma_addr(&rxd->wrx_addr,
   1695 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1696 		rxd->wrx_len = 0;
   1697 		rxd->wrx_cksum = 0;
   1698 		rxd->wrx_status = 0;
   1699 		rxd->wrx_errors = 0;
   1700 		rxd->wrx_special = 0;
   1701 	}
   1702 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1703 
   1704 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1705 }
   1706 
   1707 /*
   1708  * Device driver interface functions and commonly used functions.
   1709  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1710  */
   1711 
   1712 /* Lookup supported device table */
   1713 static const struct wm_product *
   1714 wm_lookup(const struct pci_attach_args *pa)
   1715 {
   1716 	const struct wm_product *wmp;
   1717 
   1718 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1719 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1720 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1721 			return wmp;
   1722 	}
   1723 	return NULL;
   1724 }
   1725 
   1726 /* The match function (ca_match) */
   1727 static int
   1728 wm_match(device_t parent, cfdata_t cf, void *aux)
   1729 {
   1730 	struct pci_attach_args *pa = aux;
   1731 
   1732 	if (wm_lookup(pa) != NULL)
   1733 		return 1;
   1734 
   1735 	return 0;
   1736 }
   1737 
   1738 /* The attach function (ca_attach) */
   1739 static void
   1740 wm_attach(device_t parent, device_t self, void *aux)
   1741 {
   1742 	struct wm_softc *sc = device_private(self);
   1743 	struct pci_attach_args *pa = aux;
   1744 	prop_dictionary_t dict;
   1745 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1746 	pci_chipset_tag_t pc = pa->pa_pc;
   1747 	int counts[PCI_INTR_TYPE_SIZE];
   1748 	pci_intr_type_t max_type;
   1749 	const char *eetype, *xname;
   1750 	bus_space_tag_t memt;
   1751 	bus_space_handle_t memh;
   1752 	bus_size_t memsize;
   1753 	int memh_valid;
   1754 	int i, error;
   1755 	const struct wm_product *wmp;
   1756 	prop_data_t ea;
   1757 	prop_number_t pn;
   1758 	uint8_t enaddr[ETHER_ADDR_LEN];
   1759 	char buf[256];
   1760 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1761 	pcireg_t preg, memtype;
   1762 	uint16_t eeprom_data, apme_mask;
   1763 	bool force_clear_smbi;
   1764 	uint32_t link_mode;
   1765 	uint32_t reg;
   1766 
   1767 	sc->sc_dev = self;
   1768 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1769 	sc->sc_core_stopping = false;
   1770 
   1771 	wmp = wm_lookup(pa);
   1772 #ifdef DIAGNOSTIC
   1773 	if (wmp == NULL) {
   1774 		printf("\n");
   1775 		panic("wm_attach: impossible");
   1776 	}
   1777 #endif
   1778 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1779 
   1780 	sc->sc_pc = pa->pa_pc;
   1781 	sc->sc_pcitag = pa->pa_tag;
   1782 
   1783 	if (pci_dma64_available(pa))
   1784 		sc->sc_dmat = pa->pa_dmat64;
   1785 	else
   1786 		sc->sc_dmat = pa->pa_dmat;
   1787 
   1788 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1789 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1790 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1791 
   1792 	sc->sc_type = wmp->wmp_type;
   1793 
   1794 	/* Set default function pointers */
   1795 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1796 	sc->phy.release = sc->nvm.release = wm_put_null;
   1797 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1798 
   1799 	if (sc->sc_type < WM_T_82543) {
   1800 		if (sc->sc_rev < 2) {
   1801 			aprint_error_dev(sc->sc_dev,
   1802 			    "i82542 must be at least rev. 2\n");
   1803 			return;
   1804 		}
   1805 		if (sc->sc_rev < 3)
   1806 			sc->sc_type = WM_T_82542_2_0;
   1807 	}
   1808 
   1809 	/*
   1810 	 * Disable MSI for Errata:
   1811 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1812 	 *
   1813 	 *  82544: Errata 25
   1814 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1815 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1816 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1817 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1818 	 *
   1819 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1820 	 *
   1821 	 *  82571 & 82572: Errata 63
   1822 	 */
   1823 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1824 	    || (sc->sc_type == WM_T_82572))
   1825 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1826 
   1827 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1828 	    || (sc->sc_type == WM_T_82580)
   1829 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1830 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1831 		sc->sc_flags |= WM_F_NEWQUEUE;
   1832 
   1833 	/* Set device properties (mactype) */
   1834 	dict = device_properties(sc->sc_dev);
   1835 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1836 
   1837 	/*
   1838 	 * Map the device.  All devices support memory-mapped acccess,
   1839 	 * and it is really required for normal operation.
   1840 	 */
   1841 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1842 	switch (memtype) {
   1843 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1844 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1845 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1846 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1847 		break;
   1848 	default:
   1849 		memh_valid = 0;
   1850 		break;
   1851 	}
   1852 
   1853 	if (memh_valid) {
   1854 		sc->sc_st = memt;
   1855 		sc->sc_sh = memh;
   1856 		sc->sc_ss = memsize;
   1857 	} else {
   1858 		aprint_error_dev(sc->sc_dev,
   1859 		    "unable to map device registers\n");
   1860 		return;
   1861 	}
   1862 
   1863 	/*
   1864 	 * In addition, i82544 and later support I/O mapped indirect
   1865 	 * register access.  It is not desirable (nor supported in
   1866 	 * this driver) to use it for normal operation, though it is
   1867 	 * required to work around bugs in some chip versions.
   1868 	 */
   1869 	if (sc->sc_type >= WM_T_82544) {
   1870 		/* First we have to find the I/O BAR. */
   1871 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1872 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1873 			if (memtype == PCI_MAPREG_TYPE_IO)
   1874 				break;
   1875 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1876 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1877 				i += 4;	/* skip high bits, too */
   1878 		}
   1879 		if (i < PCI_MAPREG_END) {
   1880 			/*
   1881 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1882 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1883 			 * It's no problem because newer chips has no this
   1884 			 * bug.
   1885 			 *
   1886 			 * The i8254x doesn't apparently respond when the
   1887 			 * I/O BAR is 0, which looks somewhat like it's not
   1888 			 * been configured.
   1889 			 */
   1890 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1891 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1892 				aprint_error_dev(sc->sc_dev,
   1893 				    "WARNING: I/O BAR at zero.\n");
   1894 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1895 					0, &sc->sc_iot, &sc->sc_ioh,
   1896 					NULL, &sc->sc_ios) == 0) {
   1897 				sc->sc_flags |= WM_F_IOH_VALID;
   1898 			} else
   1899 				aprint_error_dev(sc->sc_dev,
   1900 				    "WARNING: unable to map I/O space\n");
   1901 		}
   1902 
   1903 	}
   1904 
   1905 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1906 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1907 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1908 	if (sc->sc_type < WM_T_82542_2_1)
   1909 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1910 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1911 
   1912 	/* power up chip */
   1913 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1914 	    && error != EOPNOTSUPP) {
   1915 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1916 		return;
   1917 	}
   1918 
   1919 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1920 	/*
   1921 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1922 	 * resource.
   1923 	 */
   1924 	if (sc->sc_nqueues > 1) {
   1925 		max_type = PCI_INTR_TYPE_MSIX;
   1926 		/*
   1927 		 *  82583 has a MSI-X capability in the PCI configuration space
   1928 		 * but it doesn't support it. At least the document doesn't
   1929 		 * say anything about MSI-X.
   1930 		 */
   1931 		counts[PCI_INTR_TYPE_MSIX]
   1932 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1933 	} else {
   1934 		max_type = PCI_INTR_TYPE_MSI;
   1935 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1936 	}
   1937 
   1938 	/* Allocation settings */
   1939 	counts[PCI_INTR_TYPE_MSI] = 1;
   1940 	counts[PCI_INTR_TYPE_INTX] = 1;
   1941 	/* overridden by disable flags */
   1942 	if (wm_disable_msi != 0) {
   1943 		counts[PCI_INTR_TYPE_MSI] = 0;
   1944 		if (wm_disable_msix != 0) {
   1945 			max_type = PCI_INTR_TYPE_INTX;
   1946 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1947 		}
   1948 	} else if (wm_disable_msix != 0) {
   1949 		max_type = PCI_INTR_TYPE_MSI;
   1950 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1951 	}
   1952 
   1953 alloc_retry:
   1954 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1955 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1956 		return;
   1957 	}
   1958 
   1959 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1960 		error = wm_setup_msix(sc);
   1961 		if (error) {
   1962 			pci_intr_release(pc, sc->sc_intrs,
   1963 			    counts[PCI_INTR_TYPE_MSIX]);
   1964 
   1965 			/* Setup for MSI: Disable MSI-X */
   1966 			max_type = PCI_INTR_TYPE_MSI;
   1967 			counts[PCI_INTR_TYPE_MSI] = 1;
   1968 			counts[PCI_INTR_TYPE_INTX] = 1;
   1969 			goto alloc_retry;
   1970 		}
   1971 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1972 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1973 		error = wm_setup_legacy(sc);
   1974 		if (error) {
   1975 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1976 			    counts[PCI_INTR_TYPE_MSI]);
   1977 
   1978 			/* The next try is for INTx: Disable MSI */
   1979 			max_type = PCI_INTR_TYPE_INTX;
   1980 			counts[PCI_INTR_TYPE_INTX] = 1;
   1981 			goto alloc_retry;
   1982 		}
   1983 	} else {
   1984 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1985 		error = wm_setup_legacy(sc);
   1986 		if (error) {
   1987 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1988 			    counts[PCI_INTR_TYPE_INTX]);
   1989 			return;
   1990 		}
   1991 	}
   1992 
   1993 	/*
   1994 	 * Check the function ID (unit number of the chip).
   1995 	 */
   1996 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1997 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   1998 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1999 	    || (sc->sc_type == WM_T_82580)
   2000 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2001 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2002 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2003 	else
   2004 		sc->sc_funcid = 0;
   2005 
   2006 	/*
   2007 	 * Determine a few things about the bus we're connected to.
   2008 	 */
   2009 	if (sc->sc_type < WM_T_82543) {
   2010 		/* We don't really know the bus characteristics here. */
   2011 		sc->sc_bus_speed = 33;
   2012 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2013 		/*
   2014 		 * CSA (Communication Streaming Architecture) is about as fast
   2015 		 * a 32-bit 66MHz PCI Bus.
   2016 		 */
   2017 		sc->sc_flags |= WM_F_CSA;
   2018 		sc->sc_bus_speed = 66;
   2019 		aprint_verbose_dev(sc->sc_dev,
   2020 		    "Communication Streaming Architecture\n");
   2021 		if (sc->sc_type == WM_T_82547) {
   2022 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2023 			callout_setfunc(&sc->sc_txfifo_ch,
   2024 			    wm_82547_txfifo_stall, sc);
   2025 			aprint_verbose_dev(sc->sc_dev,
   2026 			    "using 82547 Tx FIFO stall work-around\n");
   2027 		}
   2028 	} else if (sc->sc_type >= WM_T_82571) {
   2029 		sc->sc_flags |= WM_F_PCIE;
   2030 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2031 		    && (sc->sc_type != WM_T_ICH10)
   2032 		    && (sc->sc_type != WM_T_PCH)
   2033 		    && (sc->sc_type != WM_T_PCH2)
   2034 		    && (sc->sc_type != WM_T_PCH_LPT)
   2035 		    && (sc->sc_type != WM_T_PCH_SPT)
   2036 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2037 			/* ICH* and PCH* have no PCIe capability registers */
   2038 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2039 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2040 				NULL) == 0)
   2041 				aprint_error_dev(sc->sc_dev,
   2042 				    "unable to find PCIe capability\n");
   2043 		}
   2044 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2045 	} else {
   2046 		reg = CSR_READ(sc, WMREG_STATUS);
   2047 		if (reg & STATUS_BUS64)
   2048 			sc->sc_flags |= WM_F_BUS64;
   2049 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2050 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2051 
   2052 			sc->sc_flags |= WM_F_PCIX;
   2053 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2054 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2055 				aprint_error_dev(sc->sc_dev,
   2056 				    "unable to find PCIX capability\n");
   2057 			else if (sc->sc_type != WM_T_82545_3 &&
   2058 				 sc->sc_type != WM_T_82546_3) {
   2059 				/*
   2060 				 * Work around a problem caused by the BIOS
   2061 				 * setting the max memory read byte count
   2062 				 * incorrectly.
   2063 				 */
   2064 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2065 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2066 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2067 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2068 
   2069 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2070 				    PCIX_CMD_BYTECNT_SHIFT;
   2071 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2072 				    PCIX_STATUS_MAXB_SHIFT;
   2073 				if (bytecnt > maxb) {
   2074 					aprint_verbose_dev(sc->sc_dev,
   2075 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2076 					    512 << bytecnt, 512 << maxb);
   2077 					pcix_cmd = (pcix_cmd &
   2078 					    ~PCIX_CMD_BYTECNT_MASK) |
   2079 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2080 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2081 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2082 					    pcix_cmd);
   2083 				}
   2084 			}
   2085 		}
   2086 		/*
   2087 		 * The quad port adapter is special; it has a PCIX-PCIX
   2088 		 * bridge on the board, and can run the secondary bus at
   2089 		 * a higher speed.
   2090 		 */
   2091 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2092 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2093 								      : 66;
   2094 		} else if (sc->sc_flags & WM_F_PCIX) {
   2095 			switch (reg & STATUS_PCIXSPD_MASK) {
   2096 			case STATUS_PCIXSPD_50_66:
   2097 				sc->sc_bus_speed = 66;
   2098 				break;
   2099 			case STATUS_PCIXSPD_66_100:
   2100 				sc->sc_bus_speed = 100;
   2101 				break;
   2102 			case STATUS_PCIXSPD_100_133:
   2103 				sc->sc_bus_speed = 133;
   2104 				break;
   2105 			default:
   2106 				aprint_error_dev(sc->sc_dev,
   2107 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2108 				    reg & STATUS_PCIXSPD_MASK);
   2109 				sc->sc_bus_speed = 66;
   2110 				break;
   2111 			}
   2112 		} else
   2113 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2114 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2115 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2116 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2117 	}
   2118 
   2119 	/* clear interesting stat counters */
   2120 	CSR_READ(sc, WMREG_COLC);
   2121 	CSR_READ(sc, WMREG_RXERRC);
   2122 
   2123 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2124 	    || (sc->sc_type >= WM_T_ICH8))
   2125 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2126 	if (sc->sc_type >= WM_T_ICH8)
   2127 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2128 
   2129 	/* Set PHY, NVM mutex related stuff */
   2130 	switch (sc->sc_type) {
   2131 	case WM_T_82542_2_0:
   2132 	case WM_T_82542_2_1:
   2133 	case WM_T_82543:
   2134 	case WM_T_82544:
   2135 		/* Microwire */
   2136 		sc->nvm.read = wm_nvm_read_uwire;
   2137 		sc->sc_nvm_wordsize = 64;
   2138 		sc->sc_nvm_addrbits = 6;
   2139 		break;
   2140 	case WM_T_82540:
   2141 	case WM_T_82545:
   2142 	case WM_T_82545_3:
   2143 	case WM_T_82546:
   2144 	case WM_T_82546_3:
   2145 		/* Microwire */
   2146 		sc->nvm.read = wm_nvm_read_uwire;
   2147 		reg = CSR_READ(sc, WMREG_EECD);
   2148 		if (reg & EECD_EE_SIZE) {
   2149 			sc->sc_nvm_wordsize = 256;
   2150 			sc->sc_nvm_addrbits = 8;
   2151 		} else {
   2152 			sc->sc_nvm_wordsize = 64;
   2153 			sc->sc_nvm_addrbits = 6;
   2154 		}
   2155 		sc->sc_flags |= WM_F_LOCK_EECD;
   2156 		sc->nvm.acquire = wm_get_eecd;
   2157 		sc->nvm.release = wm_put_eecd;
   2158 		break;
   2159 	case WM_T_82541:
   2160 	case WM_T_82541_2:
   2161 	case WM_T_82547:
   2162 	case WM_T_82547_2:
   2163 		reg = CSR_READ(sc, WMREG_EECD);
   2164 		/*
   2165 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2166 		 * on 8254[17], so set flags and functios before calling it.
   2167 		 */
   2168 		sc->sc_flags |= WM_F_LOCK_EECD;
   2169 		sc->nvm.acquire = wm_get_eecd;
   2170 		sc->nvm.release = wm_put_eecd;
   2171 		if (reg & EECD_EE_TYPE) {
   2172 			/* SPI */
   2173 			sc->nvm.read = wm_nvm_read_spi;
   2174 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2175 			wm_nvm_set_addrbits_size_eecd(sc);
   2176 		} else {
   2177 			/* Microwire */
   2178 			sc->nvm.read = wm_nvm_read_uwire;
   2179 			if ((reg & EECD_EE_ABITS) != 0) {
   2180 				sc->sc_nvm_wordsize = 256;
   2181 				sc->sc_nvm_addrbits = 8;
   2182 			} else {
   2183 				sc->sc_nvm_wordsize = 64;
   2184 				sc->sc_nvm_addrbits = 6;
   2185 			}
   2186 		}
   2187 		break;
   2188 	case WM_T_82571:
   2189 	case WM_T_82572:
   2190 		/* SPI */
   2191 		sc->nvm.read = wm_nvm_read_eerd;
   2192 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2193 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2194 		wm_nvm_set_addrbits_size_eecd(sc);
   2195 		sc->phy.acquire = wm_get_swsm_semaphore;
   2196 		sc->phy.release = wm_put_swsm_semaphore;
   2197 		sc->nvm.acquire = wm_get_nvm_82571;
   2198 		sc->nvm.release = wm_put_nvm_82571;
   2199 		break;
   2200 	case WM_T_82573:
   2201 	case WM_T_82574:
   2202 	case WM_T_82583:
   2203 		sc->nvm.read = wm_nvm_read_eerd;
   2204 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2205 		if (sc->sc_type == WM_T_82573) {
   2206 			sc->phy.acquire = wm_get_swsm_semaphore;
   2207 			sc->phy.release = wm_put_swsm_semaphore;
   2208 			sc->nvm.acquire = wm_get_nvm_82571;
   2209 			sc->nvm.release = wm_put_nvm_82571;
   2210 		} else {
   2211 			/* Both PHY and NVM use the same semaphore. */
   2212 			sc->phy.acquire = sc->nvm.acquire
   2213 			    = wm_get_swfwhw_semaphore;
   2214 			sc->phy.release = sc->nvm.release
   2215 			    = wm_put_swfwhw_semaphore;
   2216 		}
   2217 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2218 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2219 			sc->sc_nvm_wordsize = 2048;
   2220 		} else {
   2221 			/* SPI */
   2222 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2223 			wm_nvm_set_addrbits_size_eecd(sc);
   2224 		}
   2225 		break;
   2226 	case WM_T_82575:
   2227 	case WM_T_82576:
   2228 	case WM_T_82580:
   2229 	case WM_T_I350:
   2230 	case WM_T_I354:
   2231 	case WM_T_80003:
   2232 		/* SPI */
   2233 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2234 		wm_nvm_set_addrbits_size_eecd(sc);
   2235 		if ((sc->sc_type == WM_T_80003)
   2236 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2237 			sc->nvm.read = wm_nvm_read_eerd;
   2238 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2239 		} else {
   2240 			sc->nvm.read = wm_nvm_read_spi;
   2241 			sc->sc_flags |= WM_F_LOCK_EECD;
   2242 		}
   2243 		sc->phy.acquire = wm_get_phy_82575;
   2244 		sc->phy.release = wm_put_phy_82575;
   2245 		sc->nvm.acquire = wm_get_nvm_80003;
   2246 		sc->nvm.release = wm_put_nvm_80003;
   2247 		break;
   2248 	case WM_T_ICH8:
   2249 	case WM_T_ICH9:
   2250 	case WM_T_ICH10:
   2251 	case WM_T_PCH:
   2252 	case WM_T_PCH2:
   2253 	case WM_T_PCH_LPT:
   2254 		sc->nvm.read = wm_nvm_read_ich8;
   2255 		/* FLASH */
   2256 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2257 		sc->sc_nvm_wordsize = 2048;
   2258 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2259 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2260 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2261 			aprint_error_dev(sc->sc_dev,
   2262 			    "can't map FLASH registers\n");
   2263 			goto out;
   2264 		}
   2265 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2266 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2267 		    ICH_FLASH_SECTOR_SIZE;
   2268 		sc->sc_ich8_flash_bank_size =
   2269 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2270 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2271 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2272 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2273 		sc->sc_flashreg_offset = 0;
   2274 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2275 		sc->phy.release = wm_put_swflag_ich8lan;
   2276 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2277 		sc->nvm.release = wm_put_nvm_ich8lan;
   2278 		break;
   2279 	case WM_T_PCH_SPT:
   2280 	case WM_T_PCH_CNP:
   2281 		sc->nvm.read = wm_nvm_read_spt;
   2282 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2283 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2284 		sc->sc_flasht = sc->sc_st;
   2285 		sc->sc_flashh = sc->sc_sh;
   2286 		sc->sc_ich8_flash_base = 0;
   2287 		sc->sc_nvm_wordsize =
   2288 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2289 		    * NVM_SIZE_MULTIPLIER;
   2290 		/* It is size in bytes, we want words */
   2291 		sc->sc_nvm_wordsize /= 2;
   2292 		/* assume 2 banks */
   2293 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2294 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2295 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2296 		sc->phy.release = wm_put_swflag_ich8lan;
   2297 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2298 		sc->nvm.release = wm_put_nvm_ich8lan;
   2299 		break;
   2300 	case WM_T_I210:
   2301 	case WM_T_I211:
   2302 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2303 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2304 		if (wm_nvm_flash_presence_i210(sc)) {
   2305 			sc->nvm.read = wm_nvm_read_eerd;
   2306 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2307 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2308 			wm_nvm_set_addrbits_size_eecd(sc);
   2309 		} else {
   2310 			sc->nvm.read = wm_nvm_read_invm;
   2311 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2312 			sc->sc_nvm_wordsize = INVM_SIZE;
   2313 		}
   2314 		sc->phy.acquire = wm_get_phy_82575;
   2315 		sc->phy.release = wm_put_phy_82575;
   2316 		sc->nvm.acquire = wm_get_nvm_80003;
   2317 		sc->nvm.release = wm_put_nvm_80003;
   2318 		break;
   2319 	default:
   2320 		break;
   2321 	}
   2322 
   2323 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2324 	switch (sc->sc_type) {
   2325 	case WM_T_82571:
   2326 	case WM_T_82572:
   2327 		reg = CSR_READ(sc, WMREG_SWSM2);
   2328 		if ((reg & SWSM2_LOCK) == 0) {
   2329 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2330 			force_clear_smbi = true;
   2331 		} else
   2332 			force_clear_smbi = false;
   2333 		break;
   2334 	case WM_T_82573:
   2335 	case WM_T_82574:
   2336 	case WM_T_82583:
   2337 		force_clear_smbi = true;
   2338 		break;
   2339 	default:
   2340 		force_clear_smbi = false;
   2341 		break;
   2342 	}
   2343 	if (force_clear_smbi) {
   2344 		reg = CSR_READ(sc, WMREG_SWSM);
   2345 		if ((reg & SWSM_SMBI) != 0)
   2346 			aprint_error_dev(sc->sc_dev,
   2347 			    "Please update the Bootagent\n");
   2348 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2349 	}
   2350 
   2351 	/*
   2352 	 * Defer printing the EEPROM type until after verifying the checksum
   2353 	 * This allows the EEPROM type to be printed correctly in the case
   2354 	 * that no EEPROM is attached.
   2355 	 */
   2356 	/*
   2357 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2358 	 * this for later, so we can fail future reads from the EEPROM.
   2359 	 */
   2360 	if (wm_nvm_validate_checksum(sc)) {
   2361 		/*
   2362 		 * Read twice again because some PCI-e parts fail the
   2363 		 * first check due to the link being in sleep state.
   2364 		 */
   2365 		if (wm_nvm_validate_checksum(sc))
   2366 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2367 	}
   2368 
   2369 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2370 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2371 	else {
   2372 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2373 		    sc->sc_nvm_wordsize);
   2374 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2375 			aprint_verbose("iNVM");
   2376 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2377 			aprint_verbose("FLASH(HW)");
   2378 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2379 			aprint_verbose("FLASH");
   2380 		else {
   2381 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2382 				eetype = "SPI";
   2383 			else
   2384 				eetype = "MicroWire";
   2385 			aprint_verbose("(%d address bits) %s EEPROM",
   2386 			    sc->sc_nvm_addrbits, eetype);
   2387 		}
   2388 	}
   2389 	wm_nvm_version(sc);
   2390 	aprint_verbose("\n");
   2391 
   2392 	/*
   2393 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2394 	 * incorrect.
   2395 	 */
   2396 	wm_gmii_setup_phytype(sc, 0, 0);
   2397 
   2398 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2399 	switch (sc->sc_type) {
   2400 	case WM_T_ICH8:
   2401 	case WM_T_ICH9:
   2402 	case WM_T_ICH10:
   2403 	case WM_T_PCH:
   2404 	case WM_T_PCH2:
   2405 	case WM_T_PCH_LPT:
   2406 	case WM_T_PCH_SPT:
   2407 	case WM_T_PCH_CNP:
   2408 		apme_mask = WUC_APME;
   2409 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2410 		if ((eeprom_data & apme_mask) != 0)
   2411 			sc->sc_flags |= WM_F_WOL;
   2412 		break;
   2413 	default:
   2414 		break;
   2415 	}
   2416 
   2417 	/* Reset the chip to a known state. */
   2418 	wm_reset(sc);
   2419 
   2420 	/*
   2421 	 * Check for I21[01] PLL workaround.
   2422 	 *
   2423 	 * Three cases:
   2424 	 * a) Chip is I211.
   2425 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2426 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2427 	 */
   2428 	if (sc->sc_type == WM_T_I211)
   2429 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2430 	if (sc->sc_type == WM_T_I210) {
   2431 		if (!wm_nvm_flash_presence_i210(sc))
   2432 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2433 		else if ((sc->sc_nvm_ver_major < 3)
   2434 		    || ((sc->sc_nvm_ver_major == 3)
   2435 			&& (sc->sc_nvm_ver_minor < 25))) {
   2436 			aprint_verbose_dev(sc->sc_dev,
   2437 			    "ROM image version %d.%d is older than 3.25\n",
   2438 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2439 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2440 		}
   2441 	}
   2442 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2443 		wm_pll_workaround_i210(sc);
   2444 
   2445 	wm_get_wakeup(sc);
   2446 
   2447 	/* Non-AMT based hardware can now take control from firmware */
   2448 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2449 		wm_get_hw_control(sc);
   2450 
   2451 	/*
   2452 	 * Read the Ethernet address from the EEPROM, if not first found
   2453 	 * in device properties.
   2454 	 */
   2455 	ea = prop_dictionary_get(dict, "mac-address");
   2456 	if (ea != NULL) {
   2457 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2458 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2459 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2460 	} else {
   2461 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2462 			aprint_error_dev(sc->sc_dev,
   2463 			    "unable to read Ethernet address\n");
   2464 			goto out;
   2465 		}
   2466 	}
   2467 
   2468 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2469 	    ether_sprintf(enaddr));
   2470 
   2471 	/*
   2472 	 * Read the config info from the EEPROM, and set up various
   2473 	 * bits in the control registers based on their contents.
   2474 	 */
   2475 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2476 	if (pn != NULL) {
   2477 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2478 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2479 	} else {
   2480 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2481 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2482 			goto out;
   2483 		}
   2484 	}
   2485 
   2486 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2487 	if (pn != NULL) {
   2488 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2489 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2490 	} else {
   2491 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2492 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2493 			goto out;
   2494 		}
   2495 	}
   2496 
   2497 	/* check for WM_F_WOL */
   2498 	switch (sc->sc_type) {
   2499 	case WM_T_82542_2_0:
   2500 	case WM_T_82542_2_1:
   2501 	case WM_T_82543:
   2502 		/* dummy? */
   2503 		eeprom_data = 0;
   2504 		apme_mask = NVM_CFG3_APME;
   2505 		break;
   2506 	case WM_T_82544:
   2507 		apme_mask = NVM_CFG2_82544_APM_EN;
   2508 		eeprom_data = cfg2;
   2509 		break;
   2510 	case WM_T_82546:
   2511 	case WM_T_82546_3:
   2512 	case WM_T_82571:
   2513 	case WM_T_82572:
   2514 	case WM_T_82573:
   2515 	case WM_T_82574:
   2516 	case WM_T_82583:
   2517 	case WM_T_80003:
   2518 	case WM_T_82575:
   2519 	case WM_T_82576:
   2520 		apme_mask = NVM_CFG3_APME;
   2521 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2522 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2523 		break;
   2524 	case WM_T_82580:
   2525 	case WM_T_I350:
   2526 	case WM_T_I354:
   2527 	case WM_T_I210:
   2528 	case WM_T_I211:
   2529 		apme_mask = NVM_CFG3_APME;
   2530 		wm_nvm_read(sc,
   2531 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2532 		    1, &eeprom_data);
   2533 		break;
   2534 	case WM_T_ICH8:
   2535 	case WM_T_ICH9:
   2536 	case WM_T_ICH10:
   2537 	case WM_T_PCH:
   2538 	case WM_T_PCH2:
   2539 	case WM_T_PCH_LPT:
   2540 	case WM_T_PCH_SPT:
   2541 	case WM_T_PCH_CNP:
   2542 		/* Already checked before wm_reset () */
   2543 		apme_mask = eeprom_data = 0;
   2544 		break;
   2545 	default: /* XXX 82540 */
   2546 		apme_mask = NVM_CFG3_APME;
   2547 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2548 		break;
   2549 	}
   2550 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2551 	if ((eeprom_data & apme_mask) != 0)
   2552 		sc->sc_flags |= WM_F_WOL;
   2553 
   2554 	/*
   2555 	 * We have the eeprom settings, now apply the special cases
   2556 	 * where the eeprom may be wrong or the board won't support
   2557 	 * wake on lan on a particular port
   2558 	 */
   2559 	switch (sc->sc_pcidevid) {
   2560 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2561 		sc->sc_flags &= ~WM_F_WOL;
   2562 		break;
   2563 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2564 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2565 		/* Wake events only supported on port A for dual fiber
   2566 		 * regardless of eeprom setting */
   2567 		if (sc->sc_funcid == 1)
   2568 			sc->sc_flags &= ~WM_F_WOL;
   2569 		break;
   2570 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2571 		/* if quad port adapter, disable WoL on all but port A */
   2572 		if (sc->sc_funcid != 0)
   2573 			sc->sc_flags &= ~WM_F_WOL;
   2574 		break;
   2575 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2576 		/* Wake events only supported on port A for dual fiber
   2577 		 * regardless of eeprom setting */
   2578 		if (sc->sc_funcid == 1)
   2579 			sc->sc_flags &= ~WM_F_WOL;
   2580 		break;
   2581 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2582 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2583 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2584 		/* if quad port adapter, disable WoL on all but port A */
   2585 		if (sc->sc_funcid != 0)
   2586 			sc->sc_flags &= ~WM_F_WOL;
   2587 		break;
   2588 	}
   2589 
   2590 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2591 		/* Check NVM for autonegotiation */
   2592 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2593 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2594 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2595 		}
   2596 	}
   2597 
   2598 	/*
   2599 	 * XXX need special handling for some multiple port cards
   2600 	 * to disable a paticular port.
   2601 	 */
   2602 
   2603 	if (sc->sc_type >= WM_T_82544) {
   2604 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2605 		if (pn != NULL) {
   2606 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2607 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2608 		} else {
   2609 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2610 				aprint_error_dev(sc->sc_dev,
   2611 				    "unable to read SWDPIN\n");
   2612 				goto out;
   2613 			}
   2614 		}
   2615 	}
   2616 
   2617 	if (cfg1 & NVM_CFG1_ILOS)
   2618 		sc->sc_ctrl |= CTRL_ILOS;
   2619 
   2620 	/*
   2621 	 * XXX
   2622 	 * This code isn't correct because pin 2 and 3 are located
   2623 	 * in different position on newer chips. Check all datasheet.
   2624 	 *
   2625 	 * Until resolve this problem, check if a chip < 82580
   2626 	 */
   2627 	if (sc->sc_type <= WM_T_82580) {
   2628 		if (sc->sc_type >= WM_T_82544) {
   2629 			sc->sc_ctrl |=
   2630 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2631 			    CTRL_SWDPIO_SHIFT;
   2632 			sc->sc_ctrl |=
   2633 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2634 			    CTRL_SWDPINS_SHIFT;
   2635 		} else {
   2636 			sc->sc_ctrl |=
   2637 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2638 			    CTRL_SWDPIO_SHIFT;
   2639 		}
   2640 	}
   2641 
   2642 	/* XXX For other than 82580? */
   2643 	if (sc->sc_type == WM_T_82580) {
   2644 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2645 		if (nvmword & __BIT(13))
   2646 			sc->sc_ctrl |= CTRL_ILOS;
   2647 	}
   2648 
   2649 #if 0
   2650 	if (sc->sc_type >= WM_T_82544) {
   2651 		if (cfg1 & NVM_CFG1_IPS0)
   2652 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2653 		if (cfg1 & NVM_CFG1_IPS1)
   2654 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2655 		sc->sc_ctrl_ext |=
   2656 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2657 		    CTRL_EXT_SWDPIO_SHIFT;
   2658 		sc->sc_ctrl_ext |=
   2659 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2660 		    CTRL_EXT_SWDPINS_SHIFT;
   2661 	} else {
   2662 		sc->sc_ctrl_ext |=
   2663 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2664 		    CTRL_EXT_SWDPIO_SHIFT;
   2665 	}
   2666 #endif
   2667 
   2668 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2669 #if 0
   2670 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2671 #endif
   2672 
   2673 	if (sc->sc_type == WM_T_PCH) {
   2674 		uint16_t val;
   2675 
   2676 		/* Save the NVM K1 bit setting */
   2677 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2678 
   2679 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2680 			sc->sc_nvm_k1_enabled = 1;
   2681 		else
   2682 			sc->sc_nvm_k1_enabled = 0;
   2683 	}
   2684 
   2685 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2686 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2687 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2688 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2689 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2690 	    || sc->sc_type == WM_T_82573
   2691 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2692 		/* Copper only */
   2693 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2694 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2695 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2696 	    || (sc->sc_type ==WM_T_I211)) {
   2697 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2698 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2699 		switch (link_mode) {
   2700 		case CTRL_EXT_LINK_MODE_1000KX:
   2701 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2702 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2703 			break;
   2704 		case CTRL_EXT_LINK_MODE_SGMII:
   2705 			if (wm_sgmii_uses_mdio(sc)) {
   2706 				aprint_verbose_dev(sc->sc_dev,
   2707 				    "SGMII(MDIO)\n");
   2708 				sc->sc_flags |= WM_F_SGMII;
   2709 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2710 				break;
   2711 			}
   2712 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2713 			/*FALLTHROUGH*/
   2714 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2715 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2716 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2717 				if (link_mode
   2718 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2719 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2720 					sc->sc_flags |= WM_F_SGMII;
   2721 				} else {
   2722 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2723 					aprint_verbose_dev(sc->sc_dev,
   2724 					    "SERDES\n");
   2725 				}
   2726 				break;
   2727 			}
   2728 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2729 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2730 
   2731 			/* Change current link mode setting */
   2732 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2733 			switch (sc->sc_mediatype) {
   2734 			case WM_MEDIATYPE_COPPER:
   2735 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2736 				break;
   2737 			case WM_MEDIATYPE_SERDES:
   2738 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2739 				break;
   2740 			default:
   2741 				break;
   2742 			}
   2743 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2744 			break;
   2745 		case CTRL_EXT_LINK_MODE_GMII:
   2746 		default:
   2747 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2748 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2749 			break;
   2750 		}
   2751 
   2752 		reg &= ~CTRL_EXT_I2C_ENA;
   2753 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2754 			reg |= CTRL_EXT_I2C_ENA;
   2755 		else
   2756 			reg &= ~CTRL_EXT_I2C_ENA;
   2757 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2758 	} else if (sc->sc_type < WM_T_82543 ||
   2759 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2760 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2761 			aprint_error_dev(sc->sc_dev,
   2762 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2763 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2764 		}
   2765 	} else {
   2766 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2767 			aprint_error_dev(sc->sc_dev,
   2768 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2769 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2770 		}
   2771 	}
   2772 
   2773 	if (sc->sc_type >= WM_T_PCH2)
   2774 		sc->sc_flags |= WM_F_EEE;
   2775 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2776 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2777 		/* XXX: Need special handling for I354. (not yet) */
   2778 		if (sc->sc_type != WM_T_I354)
   2779 			sc->sc_flags |= WM_F_EEE;
   2780 	}
   2781 
   2782 	/* Set device properties (macflags) */
   2783 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2784 
   2785 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2786 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2787 
   2788 	/* Initialize the media structures accordingly. */
   2789 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2790 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2791 	else
   2792 		wm_tbi_mediainit(sc); /* All others */
   2793 
   2794 	ifp = &sc->sc_ethercom.ec_if;
   2795 	xname = device_xname(sc->sc_dev);
   2796 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2797 	ifp->if_softc = sc;
   2798 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2799 #ifdef WM_MPSAFE
   2800 	ifp->if_extflags = IFEF_MPSAFE;
   2801 #endif
   2802 	ifp->if_ioctl = wm_ioctl;
   2803 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2804 		ifp->if_start = wm_nq_start;
   2805 		/*
   2806 		 * When the number of CPUs is one and the controller can use
   2807 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2808 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2809 		 * and the other is used for link status changing.
   2810 		 * In this situation, wm_nq_transmit() is disadvantageous
   2811 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2812 		 */
   2813 		if (wm_is_using_multiqueue(sc))
   2814 			ifp->if_transmit = wm_nq_transmit;
   2815 	} else {
   2816 		ifp->if_start = wm_start;
   2817 		/*
   2818 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2819 		 */
   2820 		if (wm_is_using_multiqueue(sc))
   2821 			ifp->if_transmit = wm_transmit;
   2822 	}
   2823 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2824 	ifp->if_init = wm_init;
   2825 	ifp->if_stop = wm_stop;
   2826 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2827 	IFQ_SET_READY(&ifp->if_snd);
   2828 
   2829 	/* Check for jumbo frame */
   2830 	switch (sc->sc_type) {
   2831 	case WM_T_82573:
   2832 		/* XXX limited to 9234 if ASPM is disabled */
   2833 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2834 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2835 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2836 		break;
   2837 	case WM_T_82571:
   2838 	case WM_T_82572:
   2839 	case WM_T_82574:
   2840 	case WM_T_82583:
   2841 	case WM_T_82575:
   2842 	case WM_T_82576:
   2843 	case WM_T_82580:
   2844 	case WM_T_I350:
   2845 	case WM_T_I354:
   2846 	case WM_T_I210:
   2847 	case WM_T_I211:
   2848 	case WM_T_80003:
   2849 	case WM_T_ICH9:
   2850 	case WM_T_ICH10:
   2851 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2852 	case WM_T_PCH_LPT:
   2853 	case WM_T_PCH_SPT:
   2854 	case WM_T_PCH_CNP:
   2855 		/* XXX limited to 9234 */
   2856 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2857 		break;
   2858 	case WM_T_PCH:
   2859 		/* XXX limited to 4096 */
   2860 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2861 		break;
   2862 	case WM_T_82542_2_0:
   2863 	case WM_T_82542_2_1:
   2864 	case WM_T_ICH8:
   2865 		/* No support for jumbo frame */
   2866 		break;
   2867 	default:
   2868 		/* ETHER_MAX_LEN_JUMBO */
   2869 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2870 		break;
   2871 	}
   2872 
   2873 	/* If we're a i82543 or greater, we can support VLANs. */
   2874 	if (sc->sc_type >= WM_T_82543)
   2875 		sc->sc_ethercom.ec_capabilities |=
   2876 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2877 
   2878 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2879 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2880 
   2881 	/*
   2882 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2883 	 * on i82543 and later.
   2884 	 */
   2885 	if (sc->sc_type >= WM_T_82543) {
   2886 		ifp->if_capabilities |=
   2887 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2888 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2889 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2890 		    IFCAP_CSUM_TCPv6_Tx |
   2891 		    IFCAP_CSUM_UDPv6_Tx;
   2892 	}
   2893 
   2894 	/*
   2895 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2896 	 *
   2897 	 *	82541GI (8086:1076) ... no
   2898 	 *	82572EI (8086:10b9) ... yes
   2899 	 */
   2900 	if (sc->sc_type >= WM_T_82571) {
   2901 		ifp->if_capabilities |=
   2902 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2903 	}
   2904 
   2905 	/*
   2906 	 * If we're a i82544 or greater (except i82547), we can do
   2907 	 * TCP segmentation offload.
   2908 	 */
   2909 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2910 		ifp->if_capabilities |= IFCAP_TSOv4;
   2911 	}
   2912 
   2913 	if (sc->sc_type >= WM_T_82571) {
   2914 		ifp->if_capabilities |= IFCAP_TSOv6;
   2915 	}
   2916 
   2917 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2918 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2919 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2920 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2921 
   2922 #ifdef WM_MPSAFE
   2923 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2924 #else
   2925 	sc->sc_core_lock = NULL;
   2926 #endif
   2927 
   2928 	/* Attach the interface. */
   2929 	error = if_initialize(ifp);
   2930 	if (error != 0) {
   2931 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2932 		    error);
   2933 		return; /* Error */
   2934 	}
   2935 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2936 	ether_ifattach(ifp, enaddr);
   2937 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2938 	if_register(ifp);
   2939 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2940 	    RND_FLAG_DEFAULT);
   2941 
   2942 #ifdef WM_EVENT_COUNTERS
   2943 	/* Attach event counters. */
   2944 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2945 	    NULL, xname, "linkintr");
   2946 
   2947 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2948 	    NULL, xname, "tx_xoff");
   2949 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2950 	    NULL, xname, "tx_xon");
   2951 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2952 	    NULL, xname, "rx_xoff");
   2953 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2954 	    NULL, xname, "rx_xon");
   2955 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2956 	    NULL, xname, "rx_macctl");
   2957 #endif /* WM_EVENT_COUNTERS */
   2958 
   2959 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2960 		pmf_class_network_register(self, ifp);
   2961 	else
   2962 		aprint_error_dev(self, "couldn't establish power handler\n");
   2963 
   2964 	sc->sc_flags |= WM_F_ATTACHED;
   2965 out:
   2966 	return;
   2967 }
   2968 
   2969 /* The detach function (ca_detach) */
   2970 static int
   2971 wm_detach(device_t self, int flags __unused)
   2972 {
   2973 	struct wm_softc *sc = device_private(self);
   2974 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2975 	int i;
   2976 
   2977 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2978 		return 0;
   2979 
   2980 	/* Stop the interface. Callouts are stopped in it. */
   2981 	wm_stop(ifp, 1);
   2982 
   2983 	pmf_device_deregister(self);
   2984 
   2985 #ifdef WM_EVENT_COUNTERS
   2986 	evcnt_detach(&sc->sc_ev_linkintr);
   2987 
   2988 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2989 	evcnt_detach(&sc->sc_ev_tx_xon);
   2990 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2991 	evcnt_detach(&sc->sc_ev_rx_xon);
   2992 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2993 #endif /* WM_EVENT_COUNTERS */
   2994 
   2995 	/* Tell the firmware about the release */
   2996 	WM_CORE_LOCK(sc);
   2997 	wm_release_manageability(sc);
   2998 	wm_release_hw_control(sc);
   2999 	wm_enable_wakeup(sc);
   3000 	WM_CORE_UNLOCK(sc);
   3001 
   3002 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3003 
   3004 	/* Delete all remaining media. */
   3005 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   3006 
   3007 	ether_ifdetach(ifp);
   3008 	if_detach(ifp);
   3009 	if_percpuq_destroy(sc->sc_ipq);
   3010 
   3011 	/* Unload RX dmamaps and free mbufs */
   3012 	for (i = 0; i < sc->sc_nqueues; i++) {
   3013 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3014 		mutex_enter(rxq->rxq_lock);
   3015 		wm_rxdrain(rxq);
   3016 		mutex_exit(rxq->rxq_lock);
   3017 	}
   3018 	/* Must unlock here */
   3019 
   3020 	/* Disestablish the interrupt handler */
   3021 	for (i = 0; i < sc->sc_nintrs; i++) {
   3022 		if (sc->sc_ihs[i] != NULL) {
   3023 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3024 			sc->sc_ihs[i] = NULL;
   3025 		}
   3026 	}
   3027 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3028 
   3029 	wm_free_txrx_queues(sc);
   3030 
   3031 	/* Unmap the registers */
   3032 	if (sc->sc_ss) {
   3033 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3034 		sc->sc_ss = 0;
   3035 	}
   3036 	if (sc->sc_ios) {
   3037 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3038 		sc->sc_ios = 0;
   3039 	}
   3040 	if (sc->sc_flashs) {
   3041 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3042 		sc->sc_flashs = 0;
   3043 	}
   3044 
   3045 	if (sc->sc_core_lock)
   3046 		mutex_obj_free(sc->sc_core_lock);
   3047 	if (sc->sc_ich_phymtx)
   3048 		mutex_obj_free(sc->sc_ich_phymtx);
   3049 	if (sc->sc_ich_nvmmtx)
   3050 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3051 
   3052 	return 0;
   3053 }
   3054 
   3055 static bool
   3056 wm_suspend(device_t self, const pmf_qual_t *qual)
   3057 {
   3058 	struct wm_softc *sc = device_private(self);
   3059 
   3060 	wm_release_manageability(sc);
   3061 	wm_release_hw_control(sc);
   3062 	wm_enable_wakeup(sc);
   3063 
   3064 	return true;
   3065 }
   3066 
   3067 static bool
   3068 wm_resume(device_t self, const pmf_qual_t *qual)
   3069 {
   3070 	struct wm_softc *sc = device_private(self);
   3071 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3072 	pcireg_t reg;
   3073 	char buf[256];
   3074 
   3075 	reg = CSR_READ(sc, WMREG_WUS);
   3076 	if (reg != 0) {
   3077 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3078 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3079 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3080 	}
   3081 
   3082 	if (sc->sc_type >= WM_T_PCH2)
   3083 		wm_resume_workarounds_pchlan(sc);
   3084 	if ((ifp->if_flags & IFF_UP) == 0) {
   3085 		wm_reset(sc);
   3086 		/* Non-AMT based hardware can now take control from firmware */
   3087 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3088 			wm_get_hw_control(sc);
   3089 		wm_init_manageability(sc);
   3090 	} else {
   3091 		/*
   3092 		 * We called pmf_class_network_register(), so if_init() is
   3093 		 * automatically called when IFF_UP. wm_reset(),
   3094 		 * wm_get_hw_control() and wm_init_manageability() are called
   3095 		 * via wm_init().
   3096 		 */
   3097 	}
   3098 
   3099 	return true;
   3100 }
   3101 
   3102 /*
   3103  * wm_watchdog:		[ifnet interface function]
   3104  *
   3105  *	Watchdog timer handler.
   3106  */
   3107 static void
   3108 wm_watchdog(struct ifnet *ifp)
   3109 {
   3110 	int qid;
   3111 	struct wm_softc *sc = ifp->if_softc;
   3112 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3113 
   3114 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3115 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3116 
   3117 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3118 	}
   3119 
   3120 	/*
   3121 	 * IF any of queues hanged up, reset the interface.
   3122 	 */
   3123 	if (hang_queue != 0) {
   3124 		(void) wm_init(ifp);
   3125 
   3126 		/*
   3127 		 * There are still some upper layer processing which call
   3128 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3129 		 */
   3130 		/* Try to get more packets going. */
   3131 		ifp->if_start(ifp);
   3132 	}
   3133 }
   3134 
   3135 
   3136 static void
   3137 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3138 {
   3139 
   3140 	mutex_enter(txq->txq_lock);
   3141 	if (txq->txq_sending &&
   3142 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3143 		wm_watchdog_txq_locked(ifp, txq, hang);
   3144 	}
   3145 	mutex_exit(txq->txq_lock);
   3146 }
   3147 
   3148 static void
   3149 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3150     uint16_t *hang)
   3151 {
   3152 	struct wm_softc *sc = ifp->if_softc;
   3153 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3154 
   3155 	KASSERT(mutex_owned(txq->txq_lock));
   3156 
   3157 	/*
   3158 	 * Since we're using delayed interrupts, sweep up
   3159 	 * before we report an error.
   3160 	 */
   3161 	wm_txeof(txq, UINT_MAX);
   3162 
   3163 	if (txq->txq_sending)
   3164 		*hang |= __BIT(wmq->wmq_id);
   3165 
   3166 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3167 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3168 		    device_xname(sc->sc_dev));
   3169 	} else {
   3170 #ifdef WM_DEBUG
   3171 		int i, j;
   3172 		struct wm_txsoft *txs;
   3173 #endif
   3174 		log(LOG_ERR,
   3175 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3176 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3177 		    txq->txq_next);
   3178 		ifp->if_oerrors++;
   3179 #ifdef WM_DEBUG
   3180 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3181 		    i = WM_NEXTTXS(txq, i)) {
   3182 		    txs = &txq->txq_soft[i];
   3183 		    printf("txs %d tx %d -> %d\n",
   3184 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3185 		    for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3186 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3187 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3188 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3189 				    printf("\t %#08x%08x\n",
   3190 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3191 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3192 			    } else {
   3193 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3194 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3195 					txq->txq_descs[j].wtx_addr.wa_low);
   3196 				    printf("\t %#04x%02x%02x%08x\n",
   3197 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3198 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3199 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3200 					txq->txq_descs[j].wtx_cmdlen);
   3201 			    }
   3202 			if (j == txs->txs_lastdesc)
   3203 				break;
   3204 			}
   3205 		}
   3206 #endif
   3207 	}
   3208 }
   3209 
   3210 /*
   3211  * wm_tick:
   3212  *
   3213  *	One second timer, used to check link status, sweep up
   3214  *	completed transmit jobs, etc.
   3215  */
   3216 static void
   3217 wm_tick(void *arg)
   3218 {
   3219 	struct wm_softc *sc = arg;
   3220 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3221 #ifndef WM_MPSAFE
   3222 	int s = splnet();
   3223 #endif
   3224 
   3225 	WM_CORE_LOCK(sc);
   3226 
   3227 	if (sc->sc_core_stopping) {
   3228 		WM_CORE_UNLOCK(sc);
   3229 #ifndef WM_MPSAFE
   3230 		splx(s);
   3231 #endif
   3232 		return;
   3233 	}
   3234 
   3235 	if (sc->sc_type >= WM_T_82542_2_1) {
   3236 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3237 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3238 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3239 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3240 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3241 	}
   3242 
   3243 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3244 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3245 	    + CSR_READ(sc, WMREG_CRCERRS)
   3246 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3247 	    + CSR_READ(sc, WMREG_SYMERRC)
   3248 	    + CSR_READ(sc, WMREG_RXERRC)
   3249 	    + CSR_READ(sc, WMREG_SEC)
   3250 	    + CSR_READ(sc, WMREG_CEXTERR)
   3251 	    + CSR_READ(sc, WMREG_RLEC);
   3252 	/*
   3253 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3254 	 * memory. It does not mean the number of dropped packet. Because
   3255 	 * ethernet controller can receive packets in such case if there is
   3256 	 * space in phy's FIFO.
   3257 	 *
   3258 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3259 	 * own EVCNT instead of if_iqdrops.
   3260 	 */
   3261 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3262 
   3263 	if (sc->sc_flags & WM_F_HAS_MII)
   3264 		mii_tick(&sc->sc_mii);
   3265 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3266 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3267 		wm_serdes_tick(sc);
   3268 	else
   3269 		wm_tbi_tick(sc);
   3270 
   3271 	WM_CORE_UNLOCK(sc);
   3272 
   3273 	wm_watchdog(ifp);
   3274 
   3275 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3276 }
   3277 
   3278 static int
   3279 wm_ifflags_cb(struct ethercom *ec)
   3280 {
   3281 	struct ifnet *ifp = &ec->ec_if;
   3282 	struct wm_softc *sc = ifp->if_softc;
   3283 	int iffchange, ecchange;
   3284 	bool needreset = false;
   3285 	int rc = 0;
   3286 
   3287 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3288 		device_xname(sc->sc_dev), __func__));
   3289 
   3290 	WM_CORE_LOCK(sc);
   3291 
   3292 	/*
   3293 	 * Check for if_flags.
   3294 	 * Main usage is to prevent linkdown when opening bpf.
   3295 	 */
   3296 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3297 	sc->sc_if_flags = ifp->if_flags;
   3298 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3299 		needreset = true;
   3300 		goto ec;
   3301 	}
   3302 
   3303 	/* iff related updates */
   3304 	if ((iffchange & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3305 		wm_set_filter(sc);
   3306 
   3307 	wm_set_vlan(sc);
   3308 
   3309 ec:
   3310 	/* Check for ec_capenable. */
   3311 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3312 	sc->sc_ec_capenable = ec->ec_capenable;
   3313 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3314 		needreset = true;
   3315 		goto out;
   3316 	}
   3317 
   3318 	/* ec related updates */
   3319 	wm_set_eee(sc);
   3320 
   3321 out:
   3322 	if (needreset)
   3323 		rc = ENETRESET;
   3324 	WM_CORE_UNLOCK(sc);
   3325 
   3326 	return rc;
   3327 }
   3328 
   3329 /*
   3330  * wm_ioctl:		[ifnet interface function]
   3331  *
   3332  *	Handle control requests from the operator.
   3333  */
   3334 static int
   3335 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3336 {
   3337 	struct wm_softc *sc = ifp->if_softc;
   3338 	struct ifreq *ifr = (struct ifreq *) data;
   3339 	struct ifaddr *ifa = (struct ifaddr *)data;
   3340 	struct sockaddr_dl *sdl;
   3341 	int s, error;
   3342 
   3343 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3344 		device_xname(sc->sc_dev), __func__));
   3345 
   3346 #ifndef WM_MPSAFE
   3347 	s = splnet();
   3348 #endif
   3349 	switch (cmd) {
   3350 	case SIOCSIFMEDIA:
   3351 	case SIOCGIFMEDIA:
   3352 		WM_CORE_LOCK(sc);
   3353 		/* Flow control requires full-duplex mode. */
   3354 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3355 		    (ifr->ifr_media & IFM_FDX) == 0)
   3356 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3357 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3358 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3359 				/* We can do both TXPAUSE and RXPAUSE. */
   3360 				ifr->ifr_media |=
   3361 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3362 			}
   3363 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3364 		}
   3365 		WM_CORE_UNLOCK(sc);
   3366 #ifdef WM_MPSAFE
   3367 		s = splnet();
   3368 #endif
   3369 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3370 #ifdef WM_MPSAFE
   3371 		splx(s);
   3372 #endif
   3373 		break;
   3374 	case SIOCINITIFADDR:
   3375 		WM_CORE_LOCK(sc);
   3376 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3377 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3378 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3379 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3380 			/* unicast address is first multicast entry */
   3381 			wm_set_filter(sc);
   3382 			error = 0;
   3383 			WM_CORE_UNLOCK(sc);
   3384 			break;
   3385 		}
   3386 		WM_CORE_UNLOCK(sc);
   3387 		/*FALLTHROUGH*/
   3388 	default:
   3389 #ifdef WM_MPSAFE
   3390 		s = splnet();
   3391 #endif
   3392 		/* It may call wm_start, so unlock here */
   3393 		error = ether_ioctl(ifp, cmd, data);
   3394 #ifdef WM_MPSAFE
   3395 		splx(s);
   3396 #endif
   3397 		if (error != ENETRESET)
   3398 			break;
   3399 
   3400 		error = 0;
   3401 
   3402 		if (cmd == SIOCSIFCAP)
   3403 			error = (*ifp->if_init)(ifp);
   3404 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3405 			;
   3406 		else if (ifp->if_flags & IFF_RUNNING) {
   3407 			/*
   3408 			 * Multicast list has changed; set the hardware filter
   3409 			 * accordingly.
   3410 			 */
   3411 			WM_CORE_LOCK(sc);
   3412 			wm_set_filter(sc);
   3413 			WM_CORE_UNLOCK(sc);
   3414 		}
   3415 		break;
   3416 	}
   3417 
   3418 #ifndef WM_MPSAFE
   3419 	splx(s);
   3420 #endif
   3421 	return error;
   3422 }
   3423 
   3424 /* MAC address related */
   3425 
   3426 /*
   3427  * Get the offset of MAC address and return it.
   3428  * If error occured, use offset 0.
   3429  */
   3430 static uint16_t
   3431 wm_check_alt_mac_addr(struct wm_softc *sc)
   3432 {
   3433 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3434 	uint16_t offset = NVM_OFF_MACADDR;
   3435 
   3436 	/* Try to read alternative MAC address pointer */
   3437 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3438 		return 0;
   3439 
   3440 	/* Check pointer if it's valid or not. */
   3441 	if ((offset == 0x0000) || (offset == 0xffff))
   3442 		return 0;
   3443 
   3444 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3445 	/*
   3446 	 * Check whether alternative MAC address is valid or not.
   3447 	 * Some cards have non 0xffff pointer but those don't use
   3448 	 * alternative MAC address in reality.
   3449 	 *
   3450 	 * Check whether the broadcast bit is set or not.
   3451 	 */
   3452 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3453 		if (((myea[0] & 0xff) & 0x01) == 0)
   3454 			return offset; /* Found */
   3455 
   3456 	/* Not found */
   3457 	return 0;
   3458 }
   3459 
   3460 static int
   3461 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3462 {
   3463 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3464 	uint16_t offset = NVM_OFF_MACADDR;
   3465 	int do_invert = 0;
   3466 
   3467 	switch (sc->sc_type) {
   3468 	case WM_T_82580:
   3469 	case WM_T_I350:
   3470 	case WM_T_I354:
   3471 		/* EEPROM Top Level Partitioning */
   3472 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3473 		break;
   3474 	case WM_T_82571:
   3475 	case WM_T_82575:
   3476 	case WM_T_82576:
   3477 	case WM_T_80003:
   3478 	case WM_T_I210:
   3479 	case WM_T_I211:
   3480 		offset = wm_check_alt_mac_addr(sc);
   3481 		if (offset == 0)
   3482 			if ((sc->sc_funcid & 0x01) == 1)
   3483 				do_invert = 1;
   3484 		break;
   3485 	default:
   3486 		if ((sc->sc_funcid & 0x01) == 1)
   3487 			do_invert = 1;
   3488 		break;
   3489 	}
   3490 
   3491 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3492 		goto bad;
   3493 
   3494 	enaddr[0] = myea[0] & 0xff;
   3495 	enaddr[1] = myea[0] >> 8;
   3496 	enaddr[2] = myea[1] & 0xff;
   3497 	enaddr[3] = myea[1] >> 8;
   3498 	enaddr[4] = myea[2] & 0xff;
   3499 	enaddr[5] = myea[2] >> 8;
   3500 
   3501 	/*
   3502 	 * Toggle the LSB of the MAC address on the second port
   3503 	 * of some dual port cards.
   3504 	 */
   3505 	if (do_invert != 0)
   3506 		enaddr[5] ^= 1;
   3507 
   3508 	return 0;
   3509 
   3510  bad:
   3511 	return -1;
   3512 }
   3513 
   3514 /*
   3515  * wm_set_ral:
   3516  *
   3517  *	Set an entery in the receive address list.
   3518  */
   3519 static void
   3520 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3521 {
   3522 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3523 	uint32_t wlock_mac;
   3524 	int rv;
   3525 
   3526 	if (enaddr != NULL) {
   3527 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3528 		    (enaddr[3] << 24);
   3529 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3530 		ral_hi |= RAL_AV;
   3531 	} else {
   3532 		ral_lo = 0;
   3533 		ral_hi = 0;
   3534 	}
   3535 
   3536 	switch (sc->sc_type) {
   3537 	case WM_T_82542_2_0:
   3538 	case WM_T_82542_2_1:
   3539 	case WM_T_82543:
   3540 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3541 		CSR_WRITE_FLUSH(sc);
   3542 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3543 		CSR_WRITE_FLUSH(sc);
   3544 		break;
   3545 	case WM_T_PCH2:
   3546 	case WM_T_PCH_LPT:
   3547 	case WM_T_PCH_SPT:
   3548 	case WM_T_PCH_CNP:
   3549 		if (idx == 0) {
   3550 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3551 			CSR_WRITE_FLUSH(sc);
   3552 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3553 			CSR_WRITE_FLUSH(sc);
   3554 			return;
   3555 		}
   3556 		if (sc->sc_type != WM_T_PCH2) {
   3557 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3558 			    FWSM_WLOCK_MAC);
   3559 			addrl = WMREG_SHRAL(idx - 1);
   3560 			addrh = WMREG_SHRAH(idx - 1);
   3561 		} else {
   3562 			wlock_mac = 0;
   3563 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3564 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3565 		}
   3566 
   3567 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3568 			rv = wm_get_swflag_ich8lan(sc);
   3569 			if (rv != 0)
   3570 				return;
   3571 			CSR_WRITE(sc, addrl, ral_lo);
   3572 			CSR_WRITE_FLUSH(sc);
   3573 			CSR_WRITE(sc, addrh, ral_hi);
   3574 			CSR_WRITE_FLUSH(sc);
   3575 			wm_put_swflag_ich8lan(sc);
   3576 		}
   3577 
   3578 		break;
   3579 	default:
   3580 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3581 		CSR_WRITE_FLUSH(sc);
   3582 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3583 		CSR_WRITE_FLUSH(sc);
   3584 		break;
   3585 	}
   3586 }
   3587 
   3588 /*
   3589  * wm_mchash:
   3590  *
   3591  *	Compute the hash of the multicast address for the 4096-bit
   3592  *	multicast filter.
   3593  */
   3594 static uint32_t
   3595 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3596 {
   3597 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3598 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3599 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3600 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3601 	uint32_t hash;
   3602 
   3603 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3604 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3605 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3606 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3607 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3608 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3609 		return (hash & 0x3ff);
   3610 	}
   3611 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3612 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3613 
   3614 	return (hash & 0xfff);
   3615 }
   3616 
   3617 /*
   3618  *
   3619  *
   3620  */
   3621 static int
   3622 wm_rar_count(struct wm_softc *sc)
   3623 {
   3624 	int size;
   3625 
   3626 	switch (sc->sc_type) {
   3627 	case WM_T_ICH8:
   3628 		size = WM_RAL_TABSIZE_ICH8 -1;
   3629 		break;
   3630 	case WM_T_ICH9:
   3631 	case WM_T_ICH10:
   3632 	case WM_T_PCH:
   3633 		size = WM_RAL_TABSIZE_ICH8;
   3634 		break;
   3635 	case WM_T_PCH2:
   3636 		size = WM_RAL_TABSIZE_PCH2;
   3637 		break;
   3638 	case WM_T_PCH_LPT:
   3639 	case WM_T_PCH_SPT:
   3640 	case WM_T_PCH_CNP:
   3641 		size = WM_RAL_TABSIZE_PCH_LPT;
   3642 		break;
   3643 	case WM_T_82575:
   3644 		size = WM_RAL_TABSIZE_82575;
   3645 		break;
   3646 	case WM_T_82576:
   3647 	case WM_T_82580:
   3648 		size = WM_RAL_TABSIZE_82576;
   3649 		break;
   3650 	case WM_T_I350:
   3651 	case WM_T_I354:
   3652 		size = WM_RAL_TABSIZE_I350;
   3653 		break;
   3654 	default:
   3655 		size = WM_RAL_TABSIZE;
   3656 	}
   3657 
   3658 	return size;
   3659 }
   3660 
   3661 /*
   3662  * wm_set_filter:
   3663  *
   3664  *	Set up the receive filter.
   3665  */
   3666 static void
   3667 wm_set_filter(struct wm_softc *sc)
   3668 {
   3669 	struct ethercom *ec = &sc->sc_ethercom;
   3670 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3671 	struct ether_multi *enm;
   3672 	struct ether_multistep step;
   3673 	bus_addr_t mta_reg;
   3674 	uint32_t hash, reg, bit;
   3675 	int i, size, ralmax;
   3676 
   3677 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3678 		device_xname(sc->sc_dev), __func__));
   3679 
   3680 	if (sc->sc_type >= WM_T_82544)
   3681 		mta_reg = WMREG_CORDOVA_MTA;
   3682 	else
   3683 		mta_reg = WMREG_MTA;
   3684 
   3685 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3686 
   3687 	if (ifp->if_flags & IFF_BROADCAST)
   3688 		sc->sc_rctl |= RCTL_BAM;
   3689 	if (ifp->if_flags & IFF_PROMISC) {
   3690 		sc->sc_rctl |= RCTL_UPE;
   3691 		goto allmulti;
   3692 	}
   3693 
   3694 	/*
   3695 	 * Set the station address in the first RAL slot, and
   3696 	 * clear the remaining slots.
   3697 	 */
   3698 	size = wm_rar_count(sc);
   3699 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3700 
   3701 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3702 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3703 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3704 		switch (i) {
   3705 		case 0:
   3706 			/* We can use all entries */
   3707 			ralmax = size;
   3708 			break;
   3709 		case 1:
   3710 			/* Only RAR[0] */
   3711 			ralmax = 1;
   3712 			break;
   3713 		default:
   3714 			/* available SHRA + RAR[0] */
   3715 			ralmax = i + 1;
   3716 		}
   3717 	} else
   3718 		ralmax = size;
   3719 	for (i = 1; i < size; i++) {
   3720 		if (i < ralmax)
   3721 			wm_set_ral(sc, NULL, i);
   3722 	}
   3723 
   3724 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3725 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3726 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3727 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3728 		size = WM_ICH8_MC_TABSIZE;
   3729 	else
   3730 		size = WM_MC_TABSIZE;
   3731 	/* Clear out the multicast table. */
   3732 	for (i = 0; i < size; i++) {
   3733 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3734 		CSR_WRITE_FLUSH(sc);
   3735 	}
   3736 
   3737 	ETHER_LOCK(ec);
   3738 	ETHER_FIRST_MULTI(step, ec, enm);
   3739 	while (enm != NULL) {
   3740 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3741 			ETHER_UNLOCK(ec);
   3742 			/*
   3743 			 * We must listen to a range of multicast addresses.
   3744 			 * For now, just accept all multicasts, rather than
   3745 			 * trying to set only those filter bits needed to match
   3746 			 * the range.  (At this time, the only use of address
   3747 			 * ranges is for IP multicast routing, for which the
   3748 			 * range is big enough to require all bits set.)
   3749 			 */
   3750 			goto allmulti;
   3751 		}
   3752 
   3753 		hash = wm_mchash(sc, enm->enm_addrlo);
   3754 
   3755 		reg = (hash >> 5);
   3756 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3757 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3758 		    || (sc->sc_type == WM_T_PCH2)
   3759 		    || (sc->sc_type == WM_T_PCH_LPT)
   3760 		    || (sc->sc_type == WM_T_PCH_SPT)
   3761 		    || (sc->sc_type == WM_T_PCH_CNP))
   3762 			reg &= 0x1f;
   3763 		else
   3764 			reg &= 0x7f;
   3765 		bit = hash & 0x1f;
   3766 
   3767 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3768 		hash |= 1U << bit;
   3769 
   3770 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3771 			/*
   3772 			 * 82544 Errata 9: Certain register cannot be written
   3773 			 * with particular alignments in PCI-X bus operation
   3774 			 * (FCAH, MTA and VFTA).
   3775 			 */
   3776 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3777 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3778 			CSR_WRITE_FLUSH(sc);
   3779 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3780 			CSR_WRITE_FLUSH(sc);
   3781 		} else {
   3782 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3783 			CSR_WRITE_FLUSH(sc);
   3784 		}
   3785 
   3786 		ETHER_NEXT_MULTI(step, enm);
   3787 	}
   3788 	ETHER_UNLOCK(ec);
   3789 
   3790 	ifp->if_flags &= ~IFF_ALLMULTI;
   3791 	goto setit;
   3792 
   3793  allmulti:
   3794 	ifp->if_flags |= IFF_ALLMULTI;
   3795 	sc->sc_rctl |= RCTL_MPE;
   3796 
   3797  setit:
   3798 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3799 }
   3800 
   3801 /* Reset and init related */
   3802 
   3803 static void
   3804 wm_set_vlan(struct wm_softc *sc)
   3805 {
   3806 
   3807 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3808 		device_xname(sc->sc_dev), __func__));
   3809 
   3810 	/* Deal with VLAN enables. */
   3811 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3812 		sc->sc_ctrl |= CTRL_VME;
   3813 	else
   3814 		sc->sc_ctrl &= ~CTRL_VME;
   3815 
   3816 	/* Write the control registers. */
   3817 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3818 }
   3819 
   3820 static void
   3821 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3822 {
   3823 	uint32_t gcr;
   3824 	pcireg_t ctrl2;
   3825 
   3826 	gcr = CSR_READ(sc, WMREG_GCR);
   3827 
   3828 	/* Only take action if timeout value is defaulted to 0 */
   3829 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3830 		goto out;
   3831 
   3832 	if ((gcr & GCR_CAP_VER2) == 0) {
   3833 		gcr |= GCR_CMPL_TMOUT_10MS;
   3834 		goto out;
   3835 	}
   3836 
   3837 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3838 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3839 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3840 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3841 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3842 
   3843 out:
   3844 	/* Disable completion timeout resend */
   3845 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3846 
   3847 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3848 }
   3849 
   3850 void
   3851 wm_get_auto_rd_done(struct wm_softc *sc)
   3852 {
   3853 	int i;
   3854 
   3855 	/* wait for eeprom to reload */
   3856 	switch (sc->sc_type) {
   3857 	case WM_T_82571:
   3858 	case WM_T_82572:
   3859 	case WM_T_82573:
   3860 	case WM_T_82574:
   3861 	case WM_T_82583:
   3862 	case WM_T_82575:
   3863 	case WM_T_82576:
   3864 	case WM_T_82580:
   3865 	case WM_T_I350:
   3866 	case WM_T_I354:
   3867 	case WM_T_I210:
   3868 	case WM_T_I211:
   3869 	case WM_T_80003:
   3870 	case WM_T_ICH8:
   3871 	case WM_T_ICH9:
   3872 		for (i = 0; i < 10; i++) {
   3873 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3874 				break;
   3875 			delay(1000);
   3876 		}
   3877 		if (i == 10) {
   3878 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3879 			    "complete\n", device_xname(sc->sc_dev));
   3880 		}
   3881 		break;
   3882 	default:
   3883 		break;
   3884 	}
   3885 }
   3886 
   3887 void
   3888 wm_lan_init_done(struct wm_softc *sc)
   3889 {
   3890 	uint32_t reg = 0;
   3891 	int i;
   3892 
   3893 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3894 		device_xname(sc->sc_dev), __func__));
   3895 
   3896 	/* Wait for eeprom to reload */
   3897 	switch (sc->sc_type) {
   3898 	case WM_T_ICH10:
   3899 	case WM_T_PCH:
   3900 	case WM_T_PCH2:
   3901 	case WM_T_PCH_LPT:
   3902 	case WM_T_PCH_SPT:
   3903 	case WM_T_PCH_CNP:
   3904 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3905 			reg = CSR_READ(sc, WMREG_STATUS);
   3906 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3907 				break;
   3908 			delay(100);
   3909 		}
   3910 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3911 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3912 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3913 		}
   3914 		break;
   3915 	default:
   3916 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3917 		    __func__);
   3918 		break;
   3919 	}
   3920 
   3921 	reg &= ~STATUS_LAN_INIT_DONE;
   3922 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3923 }
   3924 
   3925 void
   3926 wm_get_cfg_done(struct wm_softc *sc)
   3927 {
   3928 	int mask;
   3929 	uint32_t reg;
   3930 	int i;
   3931 
   3932 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3933 		device_xname(sc->sc_dev), __func__));
   3934 
   3935 	/* Wait for eeprom to reload */
   3936 	switch (sc->sc_type) {
   3937 	case WM_T_82542_2_0:
   3938 	case WM_T_82542_2_1:
   3939 		/* null */
   3940 		break;
   3941 	case WM_T_82543:
   3942 	case WM_T_82544:
   3943 	case WM_T_82540:
   3944 	case WM_T_82545:
   3945 	case WM_T_82545_3:
   3946 	case WM_T_82546:
   3947 	case WM_T_82546_3:
   3948 	case WM_T_82541:
   3949 	case WM_T_82541_2:
   3950 	case WM_T_82547:
   3951 	case WM_T_82547_2:
   3952 	case WM_T_82573:
   3953 	case WM_T_82574:
   3954 	case WM_T_82583:
   3955 		/* generic */
   3956 		delay(10*1000);
   3957 		break;
   3958 	case WM_T_80003:
   3959 	case WM_T_82571:
   3960 	case WM_T_82572:
   3961 	case WM_T_82575:
   3962 	case WM_T_82576:
   3963 	case WM_T_82580:
   3964 	case WM_T_I350:
   3965 	case WM_T_I354:
   3966 	case WM_T_I210:
   3967 	case WM_T_I211:
   3968 		if (sc->sc_type == WM_T_82571) {
   3969 			/* Only 82571 shares port 0 */
   3970 			mask = EEMNGCTL_CFGDONE_0;
   3971 		} else
   3972 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3973 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3974 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3975 				break;
   3976 			delay(1000);
   3977 		}
   3978 		if (i >= WM_PHY_CFG_TIMEOUT)
   3979 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3980 				device_xname(sc->sc_dev), __func__));
   3981 		break;
   3982 	case WM_T_ICH8:
   3983 	case WM_T_ICH9:
   3984 	case WM_T_ICH10:
   3985 	case WM_T_PCH:
   3986 	case WM_T_PCH2:
   3987 	case WM_T_PCH_LPT:
   3988 	case WM_T_PCH_SPT:
   3989 	case WM_T_PCH_CNP:
   3990 		delay(10*1000);
   3991 		if (sc->sc_type >= WM_T_ICH10)
   3992 			wm_lan_init_done(sc);
   3993 		else
   3994 			wm_get_auto_rd_done(sc);
   3995 
   3996 		/* Clear PHY Reset Asserted bit */
   3997 		reg = CSR_READ(sc, WMREG_STATUS);
   3998 		if ((reg & STATUS_PHYRA) != 0)
   3999 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4000 		break;
   4001 	default:
   4002 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4003 		    __func__);
   4004 		break;
   4005 	}
   4006 }
   4007 
   4008 int
   4009 wm_phy_post_reset(struct wm_softc *sc)
   4010 {
   4011 	device_t dev = sc->sc_dev;
   4012 	uint16_t reg;
   4013 	int rv = 0;
   4014 
   4015 	/* This function is only for ICH8 and newer. */
   4016 	if (sc->sc_type < WM_T_ICH8)
   4017 		return 0;
   4018 
   4019 	if (wm_phy_resetisblocked(sc)) {
   4020 		/* XXX */
   4021 		device_printf(dev, "PHY is blocked\n");
   4022 		return -1;
   4023 	}
   4024 
   4025 	/* Allow time for h/w to get to quiescent state after reset */
   4026 	delay(10*1000);
   4027 
   4028 	/* Perform any necessary post-reset workarounds */
   4029 	if (sc->sc_type == WM_T_PCH)
   4030 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4031 	else if (sc->sc_type == WM_T_PCH2)
   4032 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4033 	if (rv != 0)
   4034 		return rv;
   4035 
   4036 	/* Clear the host wakeup bit after lcd reset */
   4037 	if (sc->sc_type >= WM_T_PCH) {
   4038 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4039 		reg &= ~BM_WUC_HOST_WU_BIT;
   4040 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4041 	}
   4042 
   4043 	/* Configure the LCD with the extended configuration region in NVM */
   4044 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4045 		return rv;
   4046 
   4047 	/* Configure the LCD with the OEM bits in NVM */
   4048 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4049 
   4050 	if (sc->sc_type == WM_T_PCH2) {
   4051 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4052 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4053 			delay(10 * 1000);
   4054 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4055 		}
   4056 		/* Set EEE LPI Update Timer to 200usec */
   4057 		rv = sc->phy.acquire(sc);
   4058 		if (rv)
   4059 			return rv;
   4060 		rv = wm_write_emi_reg_locked(dev,
   4061 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4062 		sc->phy.release(sc);
   4063 	}
   4064 
   4065 	return rv;
   4066 }
   4067 
   4068 /* Only for PCH and newer */
   4069 static int
   4070 wm_write_smbus_addr(struct wm_softc *sc)
   4071 {
   4072 	uint32_t strap, freq;
   4073 	uint16_t phy_data;
   4074 	int rv;
   4075 
   4076 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4077 		device_xname(sc->sc_dev), __func__));
   4078 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4079 
   4080 	strap = CSR_READ(sc, WMREG_STRAP);
   4081 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4082 
   4083 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4084 	if (rv != 0)
   4085 		return -1;
   4086 
   4087 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4088 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4089 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4090 
   4091 	if (sc->sc_phytype == WMPHY_I217) {
   4092 		/* Restore SMBus frequency */
   4093 		if (freq --) {
   4094 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4095 			    | HV_SMB_ADDR_FREQ_HIGH);
   4096 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4097 			    HV_SMB_ADDR_FREQ_LOW);
   4098 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4099 			    HV_SMB_ADDR_FREQ_HIGH);
   4100 		} else
   4101 			DPRINTF(WM_DEBUG_INIT,
   4102 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4103 				device_xname(sc->sc_dev), __func__));
   4104 	}
   4105 
   4106 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4107 	    phy_data);
   4108 }
   4109 
   4110 static int
   4111 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4112 {
   4113 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4114 	uint16_t phy_page = 0;
   4115 	int rv = 0;
   4116 
   4117 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4118 		device_xname(sc->sc_dev), __func__));
   4119 
   4120 	switch (sc->sc_type) {
   4121 	case WM_T_ICH8:
   4122 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4123 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4124 			return 0;
   4125 
   4126 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4127 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4128 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4129 			break;
   4130 		}
   4131 		/* FALLTHROUGH */
   4132 	case WM_T_PCH:
   4133 	case WM_T_PCH2:
   4134 	case WM_T_PCH_LPT:
   4135 	case WM_T_PCH_SPT:
   4136 	case WM_T_PCH_CNP:
   4137 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4138 		break;
   4139 	default:
   4140 		return 0;
   4141 	}
   4142 
   4143 	if ((rv = sc->phy.acquire(sc)) != 0)
   4144 		return rv;
   4145 
   4146 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4147 	if ((reg & sw_cfg_mask) == 0)
   4148 		goto release;
   4149 
   4150 	/*
   4151 	 * Make sure HW does not configure LCD from PHY extended configuration
   4152 	 * before SW configuration
   4153 	 */
   4154 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4155 	if ((sc->sc_type < WM_T_PCH2)
   4156 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4157 		goto release;
   4158 
   4159 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4160 		device_xname(sc->sc_dev), __func__));
   4161 	/* word_addr is in DWORD */
   4162 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4163 
   4164 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4165 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4166 	if (cnf_size == 0)
   4167 		goto release;
   4168 
   4169 	if (((sc->sc_type == WM_T_PCH)
   4170 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4171 	    || (sc->sc_type > WM_T_PCH)) {
   4172 		/*
   4173 		 * HW configures the SMBus address and LEDs when the OEM and
   4174 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4175 		 * are cleared, SW will configure them instead.
   4176 		 */
   4177 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4178 			device_xname(sc->sc_dev), __func__));
   4179 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4180 			goto release;
   4181 
   4182 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4183 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4184 		    (uint16_t)reg);
   4185 		if (rv != 0)
   4186 			goto release;
   4187 	}
   4188 
   4189 	/* Configure LCD from extended configuration region. */
   4190 	for (i = 0; i < cnf_size; i++) {
   4191 		uint16_t reg_data, reg_addr;
   4192 
   4193 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4194 			goto release;
   4195 
   4196 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4197 			goto release;
   4198 
   4199 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4200 			phy_page = reg_data;
   4201 
   4202 		reg_addr &= IGPHY_MAXREGADDR;
   4203 		reg_addr |= phy_page;
   4204 
   4205 		KASSERT(sc->phy.writereg_locked != NULL);
   4206 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4207 		    reg_data);
   4208 	}
   4209 
   4210 release:
   4211 	sc->phy.release(sc);
   4212 	return rv;
   4213 }
   4214 
   4215 /*
   4216  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4217  *  @sc:       pointer to the HW structure
   4218  *  @d0_state: boolean if entering d0 or d3 device state
   4219  *
   4220  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4221  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4222  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4223  */
   4224 int
   4225 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4226 {
   4227 	uint32_t mac_reg;
   4228 	uint16_t oem_reg;
   4229 	int rv;
   4230 
   4231 	if (sc->sc_type < WM_T_PCH)
   4232 		return 0;
   4233 
   4234 	rv = sc->phy.acquire(sc);
   4235 	if (rv != 0)
   4236 		return rv;
   4237 
   4238 	if (sc->sc_type == WM_T_PCH) {
   4239 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4240 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4241 			goto release;
   4242 	}
   4243 
   4244 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4245 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4246 		goto release;
   4247 
   4248 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4249 
   4250 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4251 	if (rv != 0)
   4252 		goto release;
   4253 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4254 
   4255 	if (d0_state) {
   4256 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4257 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4258 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4259 			oem_reg |= HV_OEM_BITS_LPLU;
   4260 	} else {
   4261 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4262 		    != 0)
   4263 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4264 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4265 		    != 0)
   4266 			oem_reg |= HV_OEM_BITS_LPLU;
   4267 	}
   4268 
   4269 	/* Set Restart auto-neg to activate the bits */
   4270 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4271 	    && (wm_phy_resetisblocked(sc) == false))
   4272 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4273 
   4274 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4275 
   4276 release:
   4277 	sc->phy.release(sc);
   4278 
   4279 	return rv;
   4280 }
   4281 
   4282 /* Init hardware bits */
   4283 void
   4284 wm_initialize_hardware_bits(struct wm_softc *sc)
   4285 {
   4286 	uint32_t tarc0, tarc1, reg;
   4287 
   4288 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4289 		device_xname(sc->sc_dev), __func__));
   4290 
   4291 	/* For 82571 variant, 80003 and ICHs */
   4292 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4293 	    || (sc->sc_type >= WM_T_80003)) {
   4294 
   4295 		/* Transmit Descriptor Control 0 */
   4296 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4297 		reg |= TXDCTL_COUNT_DESC;
   4298 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4299 
   4300 		/* Transmit Descriptor Control 1 */
   4301 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4302 		reg |= TXDCTL_COUNT_DESC;
   4303 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4304 
   4305 		/* TARC0 */
   4306 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4307 		switch (sc->sc_type) {
   4308 		case WM_T_82571:
   4309 		case WM_T_82572:
   4310 		case WM_T_82573:
   4311 		case WM_T_82574:
   4312 		case WM_T_82583:
   4313 		case WM_T_80003:
   4314 			/* Clear bits 30..27 */
   4315 			tarc0 &= ~__BITS(30, 27);
   4316 			break;
   4317 		default:
   4318 			break;
   4319 		}
   4320 
   4321 		switch (sc->sc_type) {
   4322 		case WM_T_82571:
   4323 		case WM_T_82572:
   4324 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4325 
   4326 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4327 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4328 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4329 			/* 8257[12] Errata No.7 */
   4330 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4331 
   4332 			/* TARC1 bit 28 */
   4333 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4334 				tarc1 &= ~__BIT(28);
   4335 			else
   4336 				tarc1 |= __BIT(28);
   4337 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4338 
   4339 			/*
   4340 			 * 8257[12] Errata No.13
   4341 			 * Disable Dyamic Clock Gating.
   4342 			 */
   4343 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4344 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4345 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4346 			break;
   4347 		case WM_T_82573:
   4348 		case WM_T_82574:
   4349 		case WM_T_82583:
   4350 			if ((sc->sc_type == WM_T_82574)
   4351 			    || (sc->sc_type == WM_T_82583))
   4352 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4353 
   4354 			/* Extended Device Control */
   4355 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4356 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4357 			reg |= __BIT(22);	/* Set bit 22 */
   4358 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4359 
   4360 			/* Device Control */
   4361 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4362 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4363 
   4364 			/* PCIe Control Register */
   4365 			/*
   4366 			 * 82573 Errata (unknown).
   4367 			 *
   4368 			 * 82574 Errata 25 and 82583 Errata 12
   4369 			 * "Dropped Rx Packets":
   4370 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4371 			 */
   4372 			reg = CSR_READ(sc, WMREG_GCR);
   4373 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4374 			CSR_WRITE(sc, WMREG_GCR, reg);
   4375 
   4376 			if ((sc->sc_type == WM_T_82574)
   4377 			    || (sc->sc_type == WM_T_82583)) {
   4378 				/*
   4379 				 * Document says this bit must be set for
   4380 				 * proper operation.
   4381 				 */
   4382 				reg = CSR_READ(sc, WMREG_GCR);
   4383 				reg |= __BIT(22);
   4384 				CSR_WRITE(sc, WMREG_GCR, reg);
   4385 
   4386 				/*
   4387 				 * Apply workaround for hardware errata
   4388 				 * documented in errata docs Fixes issue where
   4389 				 * some error prone or unreliable PCIe
   4390 				 * completions are occurring, particularly
   4391 				 * with ASPM enabled. Without fix, issue can
   4392 				 * cause Tx timeouts.
   4393 				 */
   4394 				reg = CSR_READ(sc, WMREG_GCR2);
   4395 				reg |= __BIT(0);
   4396 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4397 			}
   4398 			break;
   4399 		case WM_T_80003:
   4400 			/* TARC0 */
   4401 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4402 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4403 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4404 
   4405 			/* TARC1 bit 28 */
   4406 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4407 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4408 				tarc1 &= ~__BIT(28);
   4409 			else
   4410 				tarc1 |= __BIT(28);
   4411 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4412 			break;
   4413 		case WM_T_ICH8:
   4414 		case WM_T_ICH9:
   4415 		case WM_T_ICH10:
   4416 		case WM_T_PCH:
   4417 		case WM_T_PCH2:
   4418 		case WM_T_PCH_LPT:
   4419 		case WM_T_PCH_SPT:
   4420 		case WM_T_PCH_CNP:
   4421 			/* TARC0 */
   4422 			if (sc->sc_type == WM_T_ICH8) {
   4423 				/* Set TARC0 bits 29 and 28 */
   4424 				tarc0 |= __BITS(29, 28);
   4425 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4426 				tarc0 |= __BIT(29);
   4427 				/*
   4428 				 *  Drop bit 28. From Linux.
   4429 				 * See I218/I219 spec update
   4430 				 * "5. Buffer Overrun While the I219 is
   4431 				 * Processing DMA Transactions"
   4432 				 */
   4433 				tarc0 &= ~__BIT(28);
   4434 			}
   4435 			/* Set TARC0 bits 23,24,26,27 */
   4436 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4437 
   4438 			/* CTRL_EXT */
   4439 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4440 			reg |= __BIT(22);	/* Set bit 22 */
   4441 			/*
   4442 			 * Enable PHY low-power state when MAC is at D3
   4443 			 * w/o WoL
   4444 			 */
   4445 			if (sc->sc_type >= WM_T_PCH)
   4446 				reg |= CTRL_EXT_PHYPDEN;
   4447 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4448 
   4449 			/* TARC1 */
   4450 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4451 			/* bit 28 */
   4452 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4453 				tarc1 &= ~__BIT(28);
   4454 			else
   4455 				tarc1 |= __BIT(28);
   4456 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4457 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4458 
   4459 			/* Device Status */
   4460 			if (sc->sc_type == WM_T_ICH8) {
   4461 				reg = CSR_READ(sc, WMREG_STATUS);
   4462 				reg &= ~__BIT(31);
   4463 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4464 
   4465 			}
   4466 
   4467 			/* IOSFPC */
   4468 			if (sc->sc_type == WM_T_PCH_SPT) {
   4469 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4470 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4471 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4472 			}
   4473 			/*
   4474 			 * Work-around descriptor data corruption issue during
   4475 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4476 			 * capability.
   4477 			 */
   4478 			reg = CSR_READ(sc, WMREG_RFCTL);
   4479 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4480 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4481 			break;
   4482 		default:
   4483 			break;
   4484 		}
   4485 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4486 
   4487 		switch (sc->sc_type) {
   4488 		/*
   4489 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4490 		 * Avoid RSS Hash Value bug.
   4491 		 */
   4492 		case WM_T_82571:
   4493 		case WM_T_82572:
   4494 		case WM_T_82573:
   4495 		case WM_T_80003:
   4496 		case WM_T_ICH8:
   4497 			reg = CSR_READ(sc, WMREG_RFCTL);
   4498 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4499 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4500 			break;
   4501 		case WM_T_82574:
   4502 			/* use extened Rx descriptor. */
   4503 			reg = CSR_READ(sc, WMREG_RFCTL);
   4504 			reg |= WMREG_RFCTL_EXSTEN;
   4505 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4506 			break;
   4507 		default:
   4508 			break;
   4509 		}
   4510 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4511 		/*
   4512 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4513 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4514 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4515 		 * Correctly by the Device"
   4516 		 *
   4517 		 * I354(C2000) Errata AVR53:
   4518 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4519 		 * Hang"
   4520 		 */
   4521 		reg = CSR_READ(sc, WMREG_RFCTL);
   4522 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4523 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4524 	}
   4525 }
   4526 
   4527 static uint32_t
   4528 wm_rxpbs_adjust_82580(uint32_t val)
   4529 {
   4530 	uint32_t rv = 0;
   4531 
   4532 	if (val < __arraycount(wm_82580_rxpbs_table))
   4533 		rv = wm_82580_rxpbs_table[val];
   4534 
   4535 	return rv;
   4536 }
   4537 
   4538 /*
   4539  * wm_reset_phy:
   4540  *
   4541  *	generic PHY reset function.
   4542  *	Same as e1000_phy_hw_reset_generic()
   4543  */
   4544 static int
   4545 wm_reset_phy(struct wm_softc *sc)
   4546 {
   4547 	uint32_t reg;
   4548 
   4549 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4550 		device_xname(sc->sc_dev), __func__));
   4551 	if (wm_phy_resetisblocked(sc))
   4552 		return -1;
   4553 
   4554 	sc->phy.acquire(sc);
   4555 
   4556 	reg = CSR_READ(sc, WMREG_CTRL);
   4557 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4558 	CSR_WRITE_FLUSH(sc);
   4559 
   4560 	delay(sc->phy.reset_delay_us);
   4561 
   4562 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4563 	CSR_WRITE_FLUSH(sc);
   4564 
   4565 	delay(150);
   4566 
   4567 	sc->phy.release(sc);
   4568 
   4569 	wm_get_cfg_done(sc);
   4570 	wm_phy_post_reset(sc);
   4571 
   4572 	return 0;
   4573 }
   4574 
   4575 /*
   4576  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4577  * so it is enough to check sc->sc_queue[0] only.
   4578  */
   4579 static void
   4580 wm_flush_desc_rings(struct wm_softc *sc)
   4581 {
   4582 	pcireg_t preg;
   4583 	uint32_t reg;
   4584 	struct wm_txqueue *txq;
   4585 	wiseman_txdesc_t *txd;
   4586 	int nexttx;
   4587 	uint32_t rctl;
   4588 
   4589 	/* First, disable MULR fix in FEXTNVM11 */
   4590 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4591 	reg |= FEXTNVM11_DIS_MULRFIX;
   4592 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4593 
   4594 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4595 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4596 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4597 		return;
   4598 
   4599 	/* TX */
   4600 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4601 	    device_xname(sc->sc_dev), preg, reg);
   4602 	reg = CSR_READ(sc, WMREG_TCTL);
   4603 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4604 
   4605 	txq = &sc->sc_queue[0].wmq_txq;
   4606 	nexttx = txq->txq_next;
   4607 	txd = &txq->txq_descs[nexttx];
   4608 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4609 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4610 	txd->wtx_fields.wtxu_status = 0;
   4611 	txd->wtx_fields.wtxu_options = 0;
   4612 	txd->wtx_fields.wtxu_vlan = 0;
   4613 
   4614 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4615 	    BUS_SPACE_BARRIER_WRITE);
   4616 
   4617 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4618 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4619 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4620 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4621 	delay(250);
   4622 
   4623 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4624 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4625 		return;
   4626 
   4627 	/* RX */
   4628 	printf("%s: Need RX flush (reg = %08x)\n",
   4629 	    device_xname(sc->sc_dev), preg);
   4630 	rctl = CSR_READ(sc, WMREG_RCTL);
   4631 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4632 	CSR_WRITE_FLUSH(sc);
   4633 	delay(150);
   4634 
   4635 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4636 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4637 	reg &= 0xffffc000;
   4638 	/*
   4639 	 * update thresholds: prefetch threshold to 31, host threshold
   4640 	 * to 1 and make sure the granularity is "descriptors" and not
   4641 	 * "cache lines"
   4642 	 */
   4643 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4644 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4645 
   4646 	/*
   4647 	 * momentarily enable the RX ring for the changes to take
   4648 	 * effect
   4649 	 */
   4650 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4651 	CSR_WRITE_FLUSH(sc);
   4652 	delay(150);
   4653 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4654 }
   4655 
   4656 /*
   4657  * wm_reset:
   4658  *
   4659  *	Reset the i82542 chip.
   4660  */
   4661 static void
   4662 wm_reset(struct wm_softc *sc)
   4663 {
   4664 	int phy_reset = 0;
   4665 	int i, error = 0;
   4666 	uint32_t reg;
   4667 	uint16_t kmreg;
   4668 	int rv;
   4669 
   4670 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4671 		device_xname(sc->sc_dev), __func__));
   4672 	KASSERT(sc->sc_type != 0);
   4673 
   4674 	/*
   4675 	 * Allocate on-chip memory according to the MTU size.
   4676 	 * The Packet Buffer Allocation register must be written
   4677 	 * before the chip is reset.
   4678 	 */
   4679 	switch (sc->sc_type) {
   4680 	case WM_T_82547:
   4681 	case WM_T_82547_2:
   4682 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4683 		    PBA_22K : PBA_30K;
   4684 		for (i = 0; i < sc->sc_nqueues; i++) {
   4685 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4686 			txq->txq_fifo_head = 0;
   4687 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4688 			txq->txq_fifo_size =
   4689 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4690 			txq->txq_fifo_stall = 0;
   4691 		}
   4692 		break;
   4693 	case WM_T_82571:
   4694 	case WM_T_82572:
   4695 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4696 	case WM_T_80003:
   4697 		sc->sc_pba = PBA_32K;
   4698 		break;
   4699 	case WM_T_82573:
   4700 		sc->sc_pba = PBA_12K;
   4701 		break;
   4702 	case WM_T_82574:
   4703 	case WM_T_82583:
   4704 		sc->sc_pba = PBA_20K;
   4705 		break;
   4706 	case WM_T_82576:
   4707 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4708 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4709 		break;
   4710 	case WM_T_82580:
   4711 	case WM_T_I350:
   4712 	case WM_T_I354:
   4713 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4714 		break;
   4715 	case WM_T_I210:
   4716 	case WM_T_I211:
   4717 		sc->sc_pba = PBA_34K;
   4718 		break;
   4719 	case WM_T_ICH8:
   4720 		/* Workaround for a bit corruption issue in FIFO memory */
   4721 		sc->sc_pba = PBA_8K;
   4722 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4723 		break;
   4724 	case WM_T_ICH9:
   4725 	case WM_T_ICH10:
   4726 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4727 		    PBA_14K : PBA_10K;
   4728 		break;
   4729 	case WM_T_PCH:
   4730 	case WM_T_PCH2:	/* XXX 14K? */
   4731 	case WM_T_PCH_LPT:
   4732 	case WM_T_PCH_SPT:
   4733 	case WM_T_PCH_CNP:
   4734 		sc->sc_pba = PBA_26K;
   4735 		break;
   4736 	default:
   4737 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4738 		    PBA_40K : PBA_48K;
   4739 		break;
   4740 	}
   4741 	/*
   4742 	 * Only old or non-multiqueue devices have the PBA register
   4743 	 * XXX Need special handling for 82575.
   4744 	 */
   4745 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4746 	    || (sc->sc_type == WM_T_82575))
   4747 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4748 
   4749 	/* Prevent the PCI-E bus from sticking */
   4750 	if (sc->sc_flags & WM_F_PCIE) {
   4751 		int timeout = 800;
   4752 
   4753 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4754 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4755 
   4756 		while (timeout--) {
   4757 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4758 			    == 0)
   4759 				break;
   4760 			delay(100);
   4761 		}
   4762 		if (timeout == 0)
   4763 			device_printf(sc->sc_dev,
   4764 			    "failed to disable busmastering\n");
   4765 	}
   4766 
   4767 	/* Set the completion timeout for interface */
   4768 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4769 	    || (sc->sc_type == WM_T_82580)
   4770 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4771 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4772 		wm_set_pcie_completion_timeout(sc);
   4773 
   4774 	/* Clear interrupt */
   4775 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4776 	if (wm_is_using_msix(sc)) {
   4777 		if (sc->sc_type != WM_T_82574) {
   4778 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4779 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4780 		} else
   4781 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4782 	}
   4783 
   4784 	/* Stop the transmit and receive processes. */
   4785 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4786 	sc->sc_rctl &= ~RCTL_EN;
   4787 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4788 	CSR_WRITE_FLUSH(sc);
   4789 
   4790 	/* XXX set_tbi_sbp_82543() */
   4791 
   4792 	delay(10*1000);
   4793 
   4794 	/* Must acquire the MDIO ownership before MAC reset */
   4795 	switch (sc->sc_type) {
   4796 	case WM_T_82573:
   4797 	case WM_T_82574:
   4798 	case WM_T_82583:
   4799 		error = wm_get_hw_semaphore_82573(sc);
   4800 		break;
   4801 	default:
   4802 		break;
   4803 	}
   4804 
   4805 	/*
   4806 	 * 82541 Errata 29? & 82547 Errata 28?
   4807 	 * See also the description about PHY_RST bit in CTRL register
   4808 	 * in 8254x_GBe_SDM.pdf.
   4809 	 */
   4810 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4811 		CSR_WRITE(sc, WMREG_CTRL,
   4812 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4813 		CSR_WRITE_FLUSH(sc);
   4814 		delay(5000);
   4815 	}
   4816 
   4817 	switch (sc->sc_type) {
   4818 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4819 	case WM_T_82541:
   4820 	case WM_T_82541_2:
   4821 	case WM_T_82547:
   4822 	case WM_T_82547_2:
   4823 		/*
   4824 		 * On some chipsets, a reset through a memory-mapped write
   4825 		 * cycle can cause the chip to reset before completing the
   4826 		 * write cycle. This causes major headache that can be avoided
   4827 		 * by issuing the reset via indirect register writes through
   4828 		 * I/O space.
   4829 		 *
   4830 		 * So, if we successfully mapped the I/O BAR at attach time,
   4831 		 * use that. Otherwise, try our luck with a memory-mapped
   4832 		 * reset.
   4833 		 */
   4834 		if (sc->sc_flags & WM_F_IOH_VALID)
   4835 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4836 		else
   4837 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4838 		break;
   4839 	case WM_T_82545_3:
   4840 	case WM_T_82546_3:
   4841 		/* Use the shadow control register on these chips. */
   4842 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4843 		break;
   4844 	case WM_T_80003:
   4845 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4846 		sc->phy.acquire(sc);
   4847 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4848 		sc->phy.release(sc);
   4849 		break;
   4850 	case WM_T_ICH8:
   4851 	case WM_T_ICH9:
   4852 	case WM_T_ICH10:
   4853 	case WM_T_PCH:
   4854 	case WM_T_PCH2:
   4855 	case WM_T_PCH_LPT:
   4856 	case WM_T_PCH_SPT:
   4857 	case WM_T_PCH_CNP:
   4858 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4859 		if (wm_phy_resetisblocked(sc) == false) {
   4860 			/*
   4861 			 * Gate automatic PHY configuration by hardware on
   4862 			 * non-managed 82579
   4863 			 */
   4864 			if ((sc->sc_type == WM_T_PCH2)
   4865 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4866 				== 0))
   4867 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4868 
   4869 			reg |= CTRL_PHY_RESET;
   4870 			phy_reset = 1;
   4871 		} else
   4872 			printf("XXX reset is blocked!!!\n");
   4873 		sc->phy.acquire(sc);
   4874 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4875 		/* Don't insert a completion barrier when reset */
   4876 		delay(20*1000);
   4877 		mutex_exit(sc->sc_ich_phymtx);
   4878 		break;
   4879 	case WM_T_82580:
   4880 	case WM_T_I350:
   4881 	case WM_T_I354:
   4882 	case WM_T_I210:
   4883 	case WM_T_I211:
   4884 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4885 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4886 			CSR_WRITE_FLUSH(sc);
   4887 		delay(5000);
   4888 		break;
   4889 	case WM_T_82542_2_0:
   4890 	case WM_T_82542_2_1:
   4891 	case WM_T_82543:
   4892 	case WM_T_82540:
   4893 	case WM_T_82545:
   4894 	case WM_T_82546:
   4895 	case WM_T_82571:
   4896 	case WM_T_82572:
   4897 	case WM_T_82573:
   4898 	case WM_T_82574:
   4899 	case WM_T_82575:
   4900 	case WM_T_82576:
   4901 	case WM_T_82583:
   4902 	default:
   4903 		/* Everything else can safely use the documented method. */
   4904 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4905 		break;
   4906 	}
   4907 
   4908 	/* Must release the MDIO ownership after MAC reset */
   4909 	switch (sc->sc_type) {
   4910 	case WM_T_82573:
   4911 	case WM_T_82574:
   4912 	case WM_T_82583:
   4913 		if (error == 0)
   4914 			wm_put_hw_semaphore_82573(sc);
   4915 		break;
   4916 	default:
   4917 		break;
   4918 	}
   4919 
   4920 	/* Set Phy Config Counter to 50msec */
   4921 	if (sc->sc_type == WM_T_PCH2) {
   4922 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4923 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4924 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4925 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4926 	}
   4927 
   4928 	if (phy_reset != 0)
   4929 		wm_get_cfg_done(sc);
   4930 
   4931 	/* reload EEPROM */
   4932 	switch (sc->sc_type) {
   4933 	case WM_T_82542_2_0:
   4934 	case WM_T_82542_2_1:
   4935 	case WM_T_82543:
   4936 	case WM_T_82544:
   4937 		delay(10);
   4938 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4939 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4940 		CSR_WRITE_FLUSH(sc);
   4941 		delay(2000);
   4942 		break;
   4943 	case WM_T_82540:
   4944 	case WM_T_82545:
   4945 	case WM_T_82545_3:
   4946 	case WM_T_82546:
   4947 	case WM_T_82546_3:
   4948 		delay(5*1000);
   4949 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4950 		break;
   4951 	case WM_T_82541:
   4952 	case WM_T_82541_2:
   4953 	case WM_T_82547:
   4954 	case WM_T_82547_2:
   4955 		delay(20000);
   4956 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4957 		break;
   4958 	case WM_T_82571:
   4959 	case WM_T_82572:
   4960 	case WM_T_82573:
   4961 	case WM_T_82574:
   4962 	case WM_T_82583:
   4963 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4964 			delay(10);
   4965 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4966 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4967 			CSR_WRITE_FLUSH(sc);
   4968 		}
   4969 		/* check EECD_EE_AUTORD */
   4970 		wm_get_auto_rd_done(sc);
   4971 		/*
   4972 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4973 		 * is set.
   4974 		 */
   4975 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4976 		    || (sc->sc_type == WM_T_82583))
   4977 			delay(25*1000);
   4978 		break;
   4979 	case WM_T_82575:
   4980 	case WM_T_82576:
   4981 	case WM_T_82580:
   4982 	case WM_T_I350:
   4983 	case WM_T_I354:
   4984 	case WM_T_I210:
   4985 	case WM_T_I211:
   4986 	case WM_T_80003:
   4987 		/* check EECD_EE_AUTORD */
   4988 		wm_get_auto_rd_done(sc);
   4989 		break;
   4990 	case WM_T_ICH8:
   4991 	case WM_T_ICH9:
   4992 	case WM_T_ICH10:
   4993 	case WM_T_PCH:
   4994 	case WM_T_PCH2:
   4995 	case WM_T_PCH_LPT:
   4996 	case WM_T_PCH_SPT:
   4997 	case WM_T_PCH_CNP:
   4998 		break;
   4999 	default:
   5000 		panic("%s: unknown type\n", __func__);
   5001 	}
   5002 
   5003 	/* Check whether EEPROM is present or not */
   5004 	switch (sc->sc_type) {
   5005 	case WM_T_82575:
   5006 	case WM_T_82576:
   5007 	case WM_T_82580:
   5008 	case WM_T_I350:
   5009 	case WM_T_I354:
   5010 	case WM_T_ICH8:
   5011 	case WM_T_ICH9:
   5012 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5013 			/* Not found */
   5014 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5015 			if (sc->sc_type == WM_T_82575)
   5016 				wm_reset_init_script_82575(sc);
   5017 		}
   5018 		break;
   5019 	default:
   5020 		break;
   5021 	}
   5022 
   5023 	if (phy_reset != 0)
   5024 		wm_phy_post_reset(sc);
   5025 
   5026 	if ((sc->sc_type == WM_T_82580)
   5027 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5028 		/* clear global device reset status bit */
   5029 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5030 	}
   5031 
   5032 	/* Clear any pending interrupt events. */
   5033 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5034 	reg = CSR_READ(sc, WMREG_ICR);
   5035 	if (wm_is_using_msix(sc)) {
   5036 		if (sc->sc_type != WM_T_82574) {
   5037 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5038 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5039 		} else
   5040 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5041 	}
   5042 
   5043 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5044 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5045 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5046 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5047 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5048 		reg |= KABGTXD_BGSQLBIAS;
   5049 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5050 	}
   5051 
   5052 	/* reload sc_ctrl */
   5053 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5054 
   5055 	wm_set_eee(sc);
   5056 
   5057 	/*
   5058 	 * For PCH, this write will make sure that any noise will be detected
   5059 	 * as a CRC error and be dropped rather than show up as a bad packet
   5060 	 * to the DMA engine
   5061 	 */
   5062 	if (sc->sc_type == WM_T_PCH)
   5063 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5064 
   5065 	if (sc->sc_type >= WM_T_82544)
   5066 		CSR_WRITE(sc, WMREG_WUC, 0);
   5067 
   5068 	if (sc->sc_type < WM_T_82575)
   5069 		wm_disable_aspm(sc); /* Workaround for some chips */
   5070 
   5071 	wm_reset_mdicnfg_82580(sc);
   5072 
   5073 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5074 		wm_pll_workaround_i210(sc);
   5075 
   5076 	if (sc->sc_type == WM_T_80003) {
   5077 		/* default to TRUE to enable the MDIC W/A */
   5078 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5079 
   5080 		rv = wm_kmrn_readreg(sc,
   5081 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5082 		if (rv == 0) {
   5083 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5084 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5085 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5086 			else
   5087 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5088 		}
   5089 	}
   5090 }
   5091 
   5092 /*
   5093  * wm_add_rxbuf:
   5094  *
   5095  *	Add a receive buffer to the indiciated descriptor.
   5096  */
   5097 static int
   5098 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5099 {
   5100 	struct wm_softc *sc = rxq->rxq_sc;
   5101 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5102 	struct mbuf *m;
   5103 	int error;
   5104 
   5105 	KASSERT(mutex_owned(rxq->rxq_lock));
   5106 
   5107 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5108 	if (m == NULL)
   5109 		return ENOBUFS;
   5110 
   5111 	MCLGET(m, M_DONTWAIT);
   5112 	if ((m->m_flags & M_EXT) == 0) {
   5113 		m_freem(m);
   5114 		return ENOBUFS;
   5115 	}
   5116 
   5117 	if (rxs->rxs_mbuf != NULL)
   5118 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5119 
   5120 	rxs->rxs_mbuf = m;
   5121 
   5122 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5123 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   5124 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   5125 	if (error) {
   5126 		/* XXX XXX XXX */
   5127 		aprint_error_dev(sc->sc_dev,
   5128 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5129 		panic("wm_add_rxbuf");
   5130 	}
   5131 
   5132 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5133 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5134 
   5135 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5136 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5137 			wm_init_rxdesc(rxq, idx);
   5138 	} else
   5139 		wm_init_rxdesc(rxq, idx);
   5140 
   5141 	return 0;
   5142 }
   5143 
   5144 /*
   5145  * wm_rxdrain:
   5146  *
   5147  *	Drain the receive queue.
   5148  */
   5149 static void
   5150 wm_rxdrain(struct wm_rxqueue *rxq)
   5151 {
   5152 	struct wm_softc *sc = rxq->rxq_sc;
   5153 	struct wm_rxsoft *rxs;
   5154 	int i;
   5155 
   5156 	KASSERT(mutex_owned(rxq->rxq_lock));
   5157 
   5158 	for (i = 0; i < WM_NRXDESC; i++) {
   5159 		rxs = &rxq->rxq_soft[i];
   5160 		if (rxs->rxs_mbuf != NULL) {
   5161 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5162 			m_freem(rxs->rxs_mbuf);
   5163 			rxs->rxs_mbuf = NULL;
   5164 		}
   5165 	}
   5166 }
   5167 
   5168 /*
   5169  * Setup registers for RSS.
   5170  *
   5171  * XXX not yet VMDq support
   5172  */
   5173 static void
   5174 wm_init_rss(struct wm_softc *sc)
   5175 {
   5176 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5177 	int i;
   5178 
   5179 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5180 
   5181 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5182 		int qid, reta_ent;
   5183 
   5184 		qid  = i % sc->sc_nqueues;
   5185 		switch (sc->sc_type) {
   5186 		case WM_T_82574:
   5187 			reta_ent = __SHIFTIN(qid,
   5188 			    RETA_ENT_QINDEX_MASK_82574);
   5189 			break;
   5190 		case WM_T_82575:
   5191 			reta_ent = __SHIFTIN(qid,
   5192 			    RETA_ENT_QINDEX1_MASK_82575);
   5193 			break;
   5194 		default:
   5195 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5196 			break;
   5197 		}
   5198 
   5199 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5200 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5201 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5202 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5203 	}
   5204 
   5205 	rss_getkey((uint8_t *)rss_key);
   5206 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5207 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5208 
   5209 	if (sc->sc_type == WM_T_82574)
   5210 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5211 	else
   5212 		mrqc = MRQC_ENABLE_RSS_MQ;
   5213 
   5214 	/*
   5215 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5216 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5217 	 */
   5218 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5219 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5220 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5221 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5222 
   5223 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5224 }
   5225 
   5226 /*
   5227  * Adjust TX and RX queue numbers which the system actulally uses.
   5228  *
   5229  * The numbers are affected by below parameters.
   5230  *     - The nubmer of hardware queues
   5231  *     - The number of MSI-X vectors (= "nvectors" argument)
   5232  *     - ncpu
   5233  */
   5234 static void
   5235 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5236 {
   5237 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5238 
   5239 	if (nvectors < 2) {
   5240 		sc->sc_nqueues = 1;
   5241 		return;
   5242 	}
   5243 
   5244 	switch (sc->sc_type) {
   5245 	case WM_T_82572:
   5246 		hw_ntxqueues = 2;
   5247 		hw_nrxqueues = 2;
   5248 		break;
   5249 	case WM_T_82574:
   5250 		hw_ntxqueues = 2;
   5251 		hw_nrxqueues = 2;
   5252 		break;
   5253 	case WM_T_82575:
   5254 		hw_ntxqueues = 4;
   5255 		hw_nrxqueues = 4;
   5256 		break;
   5257 	case WM_T_82576:
   5258 		hw_ntxqueues = 16;
   5259 		hw_nrxqueues = 16;
   5260 		break;
   5261 	case WM_T_82580:
   5262 	case WM_T_I350:
   5263 	case WM_T_I354:
   5264 		hw_ntxqueues = 8;
   5265 		hw_nrxqueues = 8;
   5266 		break;
   5267 	case WM_T_I210:
   5268 		hw_ntxqueues = 4;
   5269 		hw_nrxqueues = 4;
   5270 		break;
   5271 	case WM_T_I211:
   5272 		hw_ntxqueues = 2;
   5273 		hw_nrxqueues = 2;
   5274 		break;
   5275 		/*
   5276 		 * As below ethernet controllers does not support MSI-X,
   5277 		 * this driver let them not use multiqueue.
   5278 		 *     - WM_T_80003
   5279 		 *     - WM_T_ICH8
   5280 		 *     - WM_T_ICH9
   5281 		 *     - WM_T_ICH10
   5282 		 *     - WM_T_PCH
   5283 		 *     - WM_T_PCH2
   5284 		 *     - WM_T_PCH_LPT
   5285 		 */
   5286 	default:
   5287 		hw_ntxqueues = 1;
   5288 		hw_nrxqueues = 1;
   5289 		break;
   5290 	}
   5291 
   5292 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5293 
   5294 	/*
   5295 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5296 	 * the number of queues used actually.
   5297 	 */
   5298 	if (nvectors < hw_nqueues + 1)
   5299 		sc->sc_nqueues = nvectors - 1;
   5300 	else
   5301 		sc->sc_nqueues = hw_nqueues;
   5302 
   5303 	/*
   5304 	 * As queues more then cpus cannot improve scaling, we limit
   5305 	 * the number of queues used actually.
   5306 	 */
   5307 	if (ncpu < sc->sc_nqueues)
   5308 		sc->sc_nqueues = ncpu;
   5309 }
   5310 
   5311 static inline bool
   5312 wm_is_using_msix(struct wm_softc *sc)
   5313 {
   5314 
   5315 	return (sc->sc_nintrs > 1);
   5316 }
   5317 
   5318 static inline bool
   5319 wm_is_using_multiqueue(struct wm_softc *sc)
   5320 {
   5321 
   5322 	return (sc->sc_nqueues > 1);
   5323 }
   5324 
   5325 static int
   5326 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5327 {
   5328 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5329 	wmq->wmq_id = qidx;
   5330 	wmq->wmq_intr_idx = intr_idx;
   5331 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5332 #ifdef WM_MPSAFE
   5333 	    | SOFTINT_MPSAFE
   5334 #endif
   5335 	    , wm_handle_queue, wmq);
   5336 	if (wmq->wmq_si != NULL)
   5337 		return 0;
   5338 
   5339 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5340 	    wmq->wmq_id);
   5341 
   5342 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5343 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5344 	return ENOMEM;
   5345 }
   5346 
   5347 /*
   5348  * Both single interrupt MSI and INTx can use this function.
   5349  */
   5350 static int
   5351 wm_setup_legacy(struct wm_softc *sc)
   5352 {
   5353 	pci_chipset_tag_t pc = sc->sc_pc;
   5354 	const char *intrstr = NULL;
   5355 	char intrbuf[PCI_INTRSTR_LEN];
   5356 	int error;
   5357 
   5358 	error = wm_alloc_txrx_queues(sc);
   5359 	if (error) {
   5360 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5361 		    error);
   5362 		return ENOMEM;
   5363 	}
   5364 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5365 	    sizeof(intrbuf));
   5366 #ifdef WM_MPSAFE
   5367 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5368 #endif
   5369 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5370 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5371 	if (sc->sc_ihs[0] == NULL) {
   5372 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5373 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5374 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5375 		return ENOMEM;
   5376 	}
   5377 
   5378 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5379 	sc->sc_nintrs = 1;
   5380 
   5381 	return wm_softint_establish(sc, 0, 0);
   5382 }
   5383 
   5384 static int
   5385 wm_setup_msix(struct wm_softc *sc)
   5386 {
   5387 	void *vih;
   5388 	kcpuset_t *affinity;
   5389 	int qidx, error, intr_idx, txrx_established;
   5390 	pci_chipset_tag_t pc = sc->sc_pc;
   5391 	const char *intrstr = NULL;
   5392 	char intrbuf[PCI_INTRSTR_LEN];
   5393 	char intr_xname[INTRDEVNAMEBUF];
   5394 
   5395 	if (sc->sc_nqueues < ncpu) {
   5396 		/*
   5397 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5398 		 * interrupts start from CPU#1.
   5399 		 */
   5400 		sc->sc_affinity_offset = 1;
   5401 	} else {
   5402 		/*
   5403 		 * In this case, this device use all CPUs. So, we unify
   5404 		 * affinitied cpu_index to msix vector number for readability.
   5405 		 */
   5406 		sc->sc_affinity_offset = 0;
   5407 	}
   5408 
   5409 	error = wm_alloc_txrx_queues(sc);
   5410 	if (error) {
   5411 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5412 		    error);
   5413 		return ENOMEM;
   5414 	}
   5415 
   5416 	kcpuset_create(&affinity, false);
   5417 	intr_idx = 0;
   5418 
   5419 	/*
   5420 	 * TX and RX
   5421 	 */
   5422 	txrx_established = 0;
   5423 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5424 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5425 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5426 
   5427 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5428 		    sizeof(intrbuf));
   5429 #ifdef WM_MPSAFE
   5430 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5431 		    PCI_INTR_MPSAFE, true);
   5432 #endif
   5433 		memset(intr_xname, 0, sizeof(intr_xname));
   5434 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5435 		    device_xname(sc->sc_dev), qidx);
   5436 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5437 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5438 		if (vih == NULL) {
   5439 			aprint_error_dev(sc->sc_dev,
   5440 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5441 			    intrstr ? " at " : "",
   5442 			    intrstr ? intrstr : "");
   5443 
   5444 			goto fail;
   5445 		}
   5446 		kcpuset_zero(affinity);
   5447 		/* Round-robin affinity */
   5448 		kcpuset_set(affinity, affinity_to);
   5449 		error = interrupt_distribute(vih, affinity, NULL);
   5450 		if (error == 0) {
   5451 			aprint_normal_dev(sc->sc_dev,
   5452 			    "for TX and RX interrupting at %s affinity to %u\n",
   5453 			    intrstr, affinity_to);
   5454 		} else {
   5455 			aprint_normal_dev(sc->sc_dev,
   5456 			    "for TX and RX interrupting at %s\n", intrstr);
   5457 		}
   5458 		sc->sc_ihs[intr_idx] = vih;
   5459 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5460 			goto fail;
   5461 		txrx_established++;
   5462 		intr_idx++;
   5463 	}
   5464 
   5465 	/*
   5466 	 * LINK
   5467 	 */
   5468 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5469 	    sizeof(intrbuf));
   5470 #ifdef WM_MPSAFE
   5471 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5472 #endif
   5473 	memset(intr_xname, 0, sizeof(intr_xname));
   5474 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5475 	    device_xname(sc->sc_dev));
   5476 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5477 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5478 	if (vih == NULL) {
   5479 		aprint_error_dev(sc->sc_dev,
   5480 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5481 		    intrstr ? " at " : "",
   5482 		    intrstr ? intrstr : "");
   5483 
   5484 		goto fail;
   5485 	}
   5486 	/* keep default affinity to LINK interrupt */
   5487 	aprint_normal_dev(sc->sc_dev,
   5488 	    "for LINK interrupting at %s\n", intrstr);
   5489 	sc->sc_ihs[intr_idx] = vih;
   5490 	sc->sc_link_intr_idx = intr_idx;
   5491 
   5492 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5493 	kcpuset_destroy(affinity);
   5494 	return 0;
   5495 
   5496  fail:
   5497 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5498 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5499 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5500 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5501 	}
   5502 
   5503 	kcpuset_destroy(affinity);
   5504 	return ENOMEM;
   5505 }
   5506 
   5507 static void
   5508 wm_unset_stopping_flags(struct wm_softc *sc)
   5509 {
   5510 	int i;
   5511 
   5512 	KASSERT(WM_CORE_LOCKED(sc));
   5513 
   5514 	/*
   5515 	 * must unset stopping flags in ascending order.
   5516 	 */
   5517 	for (i = 0; i < sc->sc_nqueues; i++) {
   5518 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5519 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5520 
   5521 		mutex_enter(txq->txq_lock);
   5522 		txq->txq_stopping = false;
   5523 		mutex_exit(txq->txq_lock);
   5524 
   5525 		mutex_enter(rxq->rxq_lock);
   5526 		rxq->rxq_stopping = false;
   5527 		mutex_exit(rxq->rxq_lock);
   5528 	}
   5529 
   5530 	sc->sc_core_stopping = false;
   5531 }
   5532 
   5533 static void
   5534 wm_set_stopping_flags(struct wm_softc *sc)
   5535 {
   5536 	int i;
   5537 
   5538 	KASSERT(WM_CORE_LOCKED(sc));
   5539 
   5540 	sc->sc_core_stopping = true;
   5541 
   5542 	/*
   5543 	 * must set stopping flags in ascending order.
   5544 	 */
   5545 	for (i = 0; i < sc->sc_nqueues; i++) {
   5546 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5547 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5548 
   5549 		mutex_enter(rxq->rxq_lock);
   5550 		rxq->rxq_stopping = true;
   5551 		mutex_exit(rxq->rxq_lock);
   5552 
   5553 		mutex_enter(txq->txq_lock);
   5554 		txq->txq_stopping = true;
   5555 		mutex_exit(txq->txq_lock);
   5556 	}
   5557 }
   5558 
   5559 /*
   5560  * write interrupt interval value to ITR or EITR
   5561  */
   5562 static void
   5563 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5564 {
   5565 
   5566 	if (!wmq->wmq_set_itr)
   5567 		return;
   5568 
   5569 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5570 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5571 
   5572 		/*
   5573 		 * 82575 doesn't have CNT_INGR field.
   5574 		 * So, overwrite counter field by software.
   5575 		 */
   5576 		if (sc->sc_type == WM_T_82575)
   5577 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5578 		else
   5579 			eitr |= EITR_CNT_INGR;
   5580 
   5581 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5582 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5583 		/*
   5584 		 * 82574 has both ITR and EITR. SET EITR when we use
   5585 		 * the multi queue function with MSI-X.
   5586 		 */
   5587 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5588 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5589 	} else {
   5590 		KASSERT(wmq->wmq_id == 0);
   5591 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5592 	}
   5593 
   5594 	wmq->wmq_set_itr = false;
   5595 }
   5596 
   5597 /*
   5598  * TODO
   5599  * Below dynamic calculation of itr is almost the same as linux igb,
   5600  * however it does not fit to wm(4). So, we will have been disable AIM
   5601  * until we will find appropriate calculation of itr.
   5602  */
   5603 /*
   5604  * calculate interrupt interval value to be going to write register in
   5605  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5606  */
   5607 static void
   5608 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5609 {
   5610 #ifdef NOTYET
   5611 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5612 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5613 	uint32_t avg_size = 0;
   5614 	uint32_t new_itr;
   5615 
   5616 	if (rxq->rxq_packets)
   5617 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5618 	if (txq->txq_packets)
   5619 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5620 
   5621 	if (avg_size == 0) {
   5622 		new_itr = 450; /* restore default value */
   5623 		goto out;
   5624 	}
   5625 
   5626 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5627 	avg_size += 24;
   5628 
   5629 	/* Don't starve jumbo frames */
   5630 	avg_size = uimin(avg_size, 3000);
   5631 
   5632 	/* Give a little boost to mid-size frames */
   5633 	if ((avg_size > 300) && (avg_size < 1200))
   5634 		new_itr = avg_size / 3;
   5635 	else
   5636 		new_itr = avg_size / 2;
   5637 
   5638 out:
   5639 	/*
   5640 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5641 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5642 	 */
   5643 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5644 		new_itr *= 4;
   5645 
   5646 	if (new_itr != wmq->wmq_itr) {
   5647 		wmq->wmq_itr = new_itr;
   5648 		wmq->wmq_set_itr = true;
   5649 	} else
   5650 		wmq->wmq_set_itr = false;
   5651 
   5652 	rxq->rxq_packets = 0;
   5653 	rxq->rxq_bytes = 0;
   5654 	txq->txq_packets = 0;
   5655 	txq->txq_bytes = 0;
   5656 #endif
   5657 }
   5658 
   5659 /*
   5660  * wm_init:		[ifnet interface function]
   5661  *
   5662  *	Initialize the interface.
   5663  */
   5664 static int
   5665 wm_init(struct ifnet *ifp)
   5666 {
   5667 	struct wm_softc *sc = ifp->if_softc;
   5668 	int ret;
   5669 
   5670 	WM_CORE_LOCK(sc);
   5671 	ret = wm_init_locked(ifp);
   5672 	WM_CORE_UNLOCK(sc);
   5673 
   5674 	return ret;
   5675 }
   5676 
   5677 static int
   5678 wm_init_locked(struct ifnet *ifp)
   5679 {
   5680 	struct wm_softc *sc = ifp->if_softc;
   5681 	struct ethercom *ec = &sc->sc_ethercom;
   5682 	int i, j, trynum, error = 0;
   5683 	uint32_t reg;
   5684 
   5685 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5686 		device_xname(sc->sc_dev), __func__));
   5687 	KASSERT(WM_CORE_LOCKED(sc));
   5688 
   5689 	/*
   5690 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5691 	 * There is a small but measurable benefit to avoiding the adjusment
   5692 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5693 	 * on such platforms.  One possibility is that the DMA itself is
   5694 	 * slightly more efficient if the front of the entire packet (instead
   5695 	 * of the front of the headers) is aligned.
   5696 	 *
   5697 	 * Note we must always set align_tweak to 0 if we are using
   5698 	 * jumbo frames.
   5699 	 */
   5700 #ifdef __NO_STRICT_ALIGNMENT
   5701 	sc->sc_align_tweak = 0;
   5702 #else
   5703 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5704 		sc->sc_align_tweak = 0;
   5705 	else
   5706 		sc->sc_align_tweak = 2;
   5707 #endif /* __NO_STRICT_ALIGNMENT */
   5708 
   5709 	/* Cancel any pending I/O. */
   5710 	wm_stop_locked(ifp, 0);
   5711 
   5712 	/* update statistics before reset */
   5713 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5714 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5715 
   5716 	/* PCH_SPT hardware workaround */
   5717 	if (sc->sc_type == WM_T_PCH_SPT)
   5718 		wm_flush_desc_rings(sc);
   5719 
   5720 	/* Reset the chip to a known state. */
   5721 	wm_reset(sc);
   5722 
   5723 	/*
   5724 	 * AMT based hardware can now take control from firmware
   5725 	 * Do this after reset.
   5726 	 */
   5727 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5728 		wm_get_hw_control(sc);
   5729 
   5730 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5731 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5732 		wm_legacy_irq_quirk_spt(sc);
   5733 
   5734 	/* Init hardware bits */
   5735 	wm_initialize_hardware_bits(sc);
   5736 
   5737 	/* Reset the PHY. */
   5738 	if (sc->sc_flags & WM_F_HAS_MII)
   5739 		wm_gmii_reset(sc);
   5740 
   5741 	if (sc->sc_type >= WM_T_ICH8) {
   5742 		reg = CSR_READ(sc, WMREG_GCR);
   5743 		/*
   5744 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5745 		 * default after reset.
   5746 		 */
   5747 		if (sc->sc_type == WM_T_ICH8)
   5748 			reg |= GCR_NO_SNOOP_ALL;
   5749 		else
   5750 			reg &= ~GCR_NO_SNOOP_ALL;
   5751 		CSR_WRITE(sc, WMREG_GCR, reg);
   5752 	}
   5753 	if ((sc->sc_type >= WM_T_ICH8)
   5754 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5755 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5756 
   5757 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5758 		reg |= CTRL_EXT_RO_DIS;
   5759 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5760 	}
   5761 
   5762 	/* Calculate (E)ITR value */
   5763 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5764 		/*
   5765 		 * For NEWQUEUE's EITR (except for 82575).
   5766 		 * 82575's EITR should be set same throttling value as other
   5767 		 * old controllers' ITR because the interrupt/sec calculation
   5768 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5769 		 *
   5770 		 * 82574's EITR should be set same throttling value as ITR.
   5771 		 *
   5772 		 * For N interrupts/sec, set this value to:
   5773 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5774 		 */
   5775 		sc->sc_itr_init = 450;
   5776 	} else if (sc->sc_type >= WM_T_82543) {
   5777 		/*
   5778 		 * Set up the interrupt throttling register (units of 256ns)
   5779 		 * Note that a footnote in Intel's documentation says this
   5780 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5781 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5782 		 * that that is also true for the 1024ns units of the other
   5783 		 * interrupt-related timer registers -- so, really, we ought
   5784 		 * to divide this value by 4 when the link speed is low.
   5785 		 *
   5786 		 * XXX implement this division at link speed change!
   5787 		 */
   5788 
   5789 		/*
   5790 		 * For N interrupts/sec, set this value to:
   5791 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5792 		 * absolute and packet timer values to this value
   5793 		 * divided by 4 to get "simple timer" behavior.
   5794 		 */
   5795 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5796 	}
   5797 
   5798 	error = wm_init_txrx_queues(sc);
   5799 	if (error)
   5800 		goto out;
   5801 
   5802 	/*
   5803 	 * Clear out the VLAN table -- we don't use it (yet).
   5804 	 */
   5805 	CSR_WRITE(sc, WMREG_VET, 0);
   5806 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5807 		trynum = 10; /* Due to hw errata */
   5808 	else
   5809 		trynum = 1;
   5810 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5811 		for (j = 0; j < trynum; j++)
   5812 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5813 
   5814 	/*
   5815 	 * Set up flow-control parameters.
   5816 	 *
   5817 	 * XXX Values could probably stand some tuning.
   5818 	 */
   5819 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5820 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5821 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5822 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5823 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5824 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5825 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5826 	}
   5827 
   5828 	sc->sc_fcrtl = FCRTL_DFLT;
   5829 	if (sc->sc_type < WM_T_82543) {
   5830 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5831 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5832 	} else {
   5833 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5834 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5835 	}
   5836 
   5837 	if (sc->sc_type == WM_T_80003)
   5838 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5839 	else
   5840 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5841 
   5842 	/* Writes the control register. */
   5843 	wm_set_vlan(sc);
   5844 
   5845 	if (sc->sc_flags & WM_F_HAS_MII) {
   5846 		uint16_t kmreg;
   5847 
   5848 		switch (sc->sc_type) {
   5849 		case WM_T_80003:
   5850 		case WM_T_ICH8:
   5851 		case WM_T_ICH9:
   5852 		case WM_T_ICH10:
   5853 		case WM_T_PCH:
   5854 		case WM_T_PCH2:
   5855 		case WM_T_PCH_LPT:
   5856 		case WM_T_PCH_SPT:
   5857 		case WM_T_PCH_CNP:
   5858 			/*
   5859 			 * Set the mac to wait the maximum time between each
   5860 			 * iteration and increase the max iterations when
   5861 			 * polling the phy; this fixes erroneous timeouts at
   5862 			 * 10Mbps.
   5863 			 */
   5864 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5865 			    0xFFFF);
   5866 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5867 			    &kmreg);
   5868 			kmreg |= 0x3F;
   5869 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5870 			    kmreg);
   5871 			break;
   5872 		default:
   5873 			break;
   5874 		}
   5875 
   5876 		if (sc->sc_type == WM_T_80003) {
   5877 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5878 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5879 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5880 
   5881 			/* Bypass RX and TX FIFO's */
   5882 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5883 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5884 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5885 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5886 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5887 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5888 		}
   5889 	}
   5890 #if 0
   5891 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5892 #endif
   5893 
   5894 	/* Set up checksum offload parameters. */
   5895 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5896 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5897 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5898 		reg |= RXCSUM_IPOFL;
   5899 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5900 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5901 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5902 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5903 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5904 
   5905 	/* Set registers about MSI-X */
   5906 	if (wm_is_using_msix(sc)) {
   5907 		uint32_t ivar;
   5908 		struct wm_queue *wmq;
   5909 		int qid, qintr_idx;
   5910 
   5911 		if (sc->sc_type == WM_T_82575) {
   5912 			/* Interrupt control */
   5913 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5914 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5915 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5916 
   5917 			/* TX and RX */
   5918 			for (i = 0; i < sc->sc_nqueues; i++) {
   5919 				wmq = &sc->sc_queue[i];
   5920 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5921 				    EITR_TX_QUEUE(wmq->wmq_id)
   5922 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5923 			}
   5924 			/* Link status */
   5925 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5926 			    EITR_OTHER);
   5927 		} else if (sc->sc_type == WM_T_82574) {
   5928 			/* Interrupt control */
   5929 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5930 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5931 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5932 
   5933 			/*
   5934 			 * workaround issue with spurious interrupts
   5935 			 * in MSI-X mode.
   5936 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5937 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5938 			 */
   5939 			reg = CSR_READ(sc, WMREG_RFCTL);
   5940 			reg |= WMREG_RFCTL_ACKDIS;
   5941 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5942 
   5943 			ivar = 0;
   5944 			/* TX and RX */
   5945 			for (i = 0; i < sc->sc_nqueues; i++) {
   5946 				wmq = &sc->sc_queue[i];
   5947 				qid = wmq->wmq_id;
   5948 				qintr_idx = wmq->wmq_intr_idx;
   5949 
   5950 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5951 				    IVAR_TX_MASK_Q_82574(qid));
   5952 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5953 				    IVAR_RX_MASK_Q_82574(qid));
   5954 			}
   5955 			/* Link status */
   5956 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5957 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5958 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5959 		} else {
   5960 			/* Interrupt control */
   5961 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5962 			    | GPIE_EIAME | GPIE_PBA);
   5963 
   5964 			switch (sc->sc_type) {
   5965 			case WM_T_82580:
   5966 			case WM_T_I350:
   5967 			case WM_T_I354:
   5968 			case WM_T_I210:
   5969 			case WM_T_I211:
   5970 				/* TX and RX */
   5971 				for (i = 0; i < sc->sc_nqueues; i++) {
   5972 					wmq = &sc->sc_queue[i];
   5973 					qid = wmq->wmq_id;
   5974 					qintr_idx = wmq->wmq_intr_idx;
   5975 
   5976 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5977 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5978 					ivar |= __SHIFTIN((qintr_idx
   5979 						| IVAR_VALID),
   5980 					    IVAR_TX_MASK_Q(qid));
   5981 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5982 					ivar |= __SHIFTIN((qintr_idx
   5983 						| IVAR_VALID),
   5984 					    IVAR_RX_MASK_Q(qid));
   5985 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5986 				}
   5987 				break;
   5988 			case WM_T_82576:
   5989 				/* TX and RX */
   5990 				for (i = 0; i < sc->sc_nqueues; i++) {
   5991 					wmq = &sc->sc_queue[i];
   5992 					qid = wmq->wmq_id;
   5993 					qintr_idx = wmq->wmq_intr_idx;
   5994 
   5995 					ivar = CSR_READ(sc,
   5996 					    WMREG_IVAR_Q_82576(qid));
   5997 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5998 					ivar |= __SHIFTIN((qintr_idx
   5999 						| IVAR_VALID),
   6000 					    IVAR_TX_MASK_Q_82576(qid));
   6001 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6002 					ivar |= __SHIFTIN((qintr_idx
   6003 						| IVAR_VALID),
   6004 					    IVAR_RX_MASK_Q_82576(qid));
   6005 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6006 					    ivar);
   6007 				}
   6008 				break;
   6009 			default:
   6010 				break;
   6011 			}
   6012 
   6013 			/* Link status */
   6014 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6015 			    IVAR_MISC_OTHER);
   6016 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6017 		}
   6018 
   6019 		if (wm_is_using_multiqueue(sc)) {
   6020 			wm_init_rss(sc);
   6021 
   6022 			/*
   6023 			** NOTE: Receive Full-Packet Checksum Offload
   6024 			** is mutually exclusive with Multiqueue. However
   6025 			** this is not the same as TCP/IP checksums which
   6026 			** still work.
   6027 			*/
   6028 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6029 			reg |= RXCSUM_PCSD;
   6030 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6031 		}
   6032 	}
   6033 
   6034 	/* Set up the interrupt registers. */
   6035 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6036 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6037 	    ICR_RXO | ICR_RXT0;
   6038 	if (wm_is_using_msix(sc)) {
   6039 		uint32_t mask;
   6040 		struct wm_queue *wmq;
   6041 
   6042 		switch (sc->sc_type) {
   6043 		case WM_T_82574:
   6044 			mask = 0;
   6045 			for (i = 0; i < sc->sc_nqueues; i++) {
   6046 				wmq = &sc->sc_queue[i];
   6047 				mask |= ICR_TXQ(wmq->wmq_id);
   6048 				mask |= ICR_RXQ(wmq->wmq_id);
   6049 			}
   6050 			mask |= ICR_OTHER;
   6051 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6052 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6053 			break;
   6054 		default:
   6055 			if (sc->sc_type == WM_T_82575) {
   6056 				mask = 0;
   6057 				for (i = 0; i < sc->sc_nqueues; i++) {
   6058 					wmq = &sc->sc_queue[i];
   6059 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6060 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6061 				}
   6062 				mask |= EITR_OTHER;
   6063 			} else {
   6064 				mask = 0;
   6065 				for (i = 0; i < sc->sc_nqueues; i++) {
   6066 					wmq = &sc->sc_queue[i];
   6067 					mask |= 1 << wmq->wmq_intr_idx;
   6068 				}
   6069 				mask |= 1 << sc->sc_link_intr_idx;
   6070 			}
   6071 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6072 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6073 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6074 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   6075 			break;
   6076 		}
   6077 	} else
   6078 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6079 
   6080 	/* Set up the inter-packet gap. */
   6081 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6082 
   6083 	if (sc->sc_type >= WM_T_82543) {
   6084 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6085 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6086 			wm_itrs_writereg(sc, wmq);
   6087 		}
   6088 		/*
   6089 		 * Link interrupts occur much less than TX
   6090 		 * interrupts and RX interrupts. So, we don't
   6091 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6092 		 * FreeBSD's if_igb.
   6093 		 */
   6094 	}
   6095 
   6096 	/* Set the VLAN ethernetype. */
   6097 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6098 
   6099 	/*
   6100 	 * Set up the transmit control register; we start out with
   6101 	 * a collision distance suitable for FDX, but update it whe
   6102 	 * we resolve the media type.
   6103 	 */
   6104 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6105 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6106 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6107 	if (sc->sc_type >= WM_T_82571)
   6108 		sc->sc_tctl |= TCTL_MULR;
   6109 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6110 
   6111 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6112 		/* Write TDT after TCTL.EN is set. See the document. */
   6113 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6114 	}
   6115 
   6116 	if (sc->sc_type == WM_T_80003) {
   6117 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6118 		reg &= ~TCTL_EXT_GCEX_MASK;
   6119 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6120 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6121 	}
   6122 
   6123 	/* Set the media. */
   6124 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6125 		goto out;
   6126 
   6127 	/* Configure for OS presence */
   6128 	wm_init_manageability(sc);
   6129 
   6130 	/*
   6131 	 * Set up the receive control register; we actually program the
   6132 	 * register when we set the receive filter. Use multicast address
   6133 	 * offset type 0.
   6134 	 *
   6135 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6136 	 * don't enable that feature.
   6137 	 */
   6138 	sc->sc_mchash_type = 0;
   6139 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6140 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6141 
   6142 	/*
   6143 	 * 82574 use one buffer extended Rx descriptor.
   6144 	 */
   6145 	if (sc->sc_type == WM_T_82574)
   6146 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6147 
   6148 	/*
   6149 	 * The I350 has a bug where it always strips the CRC whether
   6150 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6151 	 */
   6152 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6153 	    || (sc->sc_type == WM_T_I210))
   6154 		sc->sc_rctl |= RCTL_SECRC;
   6155 
   6156 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6157 	    && (ifp->if_mtu > ETHERMTU)) {
   6158 		sc->sc_rctl |= RCTL_LPE;
   6159 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6160 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6161 	}
   6162 
   6163 	if (MCLBYTES == 2048)
   6164 		sc->sc_rctl |= RCTL_2k;
   6165 	else {
   6166 		if (sc->sc_type >= WM_T_82543) {
   6167 			switch (MCLBYTES) {
   6168 			case 4096:
   6169 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6170 				break;
   6171 			case 8192:
   6172 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6173 				break;
   6174 			case 16384:
   6175 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6176 				break;
   6177 			default:
   6178 				panic("wm_init: MCLBYTES %d unsupported",
   6179 				    MCLBYTES);
   6180 				break;
   6181 			}
   6182 		} else
   6183 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6184 	}
   6185 
   6186 	/* Enable ECC */
   6187 	switch (sc->sc_type) {
   6188 	case WM_T_82571:
   6189 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6190 		reg |= PBA_ECC_CORR_EN;
   6191 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6192 		break;
   6193 	case WM_T_PCH_LPT:
   6194 	case WM_T_PCH_SPT:
   6195 	case WM_T_PCH_CNP:
   6196 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6197 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6198 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6199 
   6200 		sc->sc_ctrl |= CTRL_MEHE;
   6201 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6202 		break;
   6203 	default:
   6204 		break;
   6205 	}
   6206 
   6207 	/*
   6208 	 * Set the receive filter.
   6209 	 *
   6210 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6211 	 * the setting of RCTL.EN in wm_set_filter()
   6212 	 */
   6213 	wm_set_filter(sc);
   6214 
   6215 	/* On 575 and later set RDT only if RX enabled */
   6216 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6217 		int qidx;
   6218 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6219 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6220 			for (i = 0; i < WM_NRXDESC; i++) {
   6221 				mutex_enter(rxq->rxq_lock);
   6222 				wm_init_rxdesc(rxq, i);
   6223 				mutex_exit(rxq->rxq_lock);
   6224 
   6225 			}
   6226 		}
   6227 	}
   6228 
   6229 	wm_unset_stopping_flags(sc);
   6230 
   6231 	/* Start the one second link check clock. */
   6232 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6233 
   6234 	/* ...all done! */
   6235 	ifp->if_flags |= IFF_RUNNING;
   6236 	ifp->if_flags &= ~IFF_OACTIVE;
   6237 
   6238  out:
   6239 	/* Save last flags for the callback */
   6240 	sc->sc_if_flags = ifp->if_flags;
   6241 	sc->sc_ec_capenable = ec->ec_capenable;
   6242 	if (error)
   6243 		log(LOG_ERR, "%s: interface not running\n",
   6244 		    device_xname(sc->sc_dev));
   6245 	return error;
   6246 }
   6247 
   6248 /*
   6249  * wm_stop:		[ifnet interface function]
   6250  *
   6251  *	Stop transmission on the interface.
   6252  */
   6253 static void
   6254 wm_stop(struct ifnet *ifp, int disable)
   6255 {
   6256 	struct wm_softc *sc = ifp->if_softc;
   6257 
   6258 	WM_CORE_LOCK(sc);
   6259 	wm_stop_locked(ifp, disable);
   6260 	WM_CORE_UNLOCK(sc);
   6261 }
   6262 
   6263 static void
   6264 wm_stop_locked(struct ifnet *ifp, int disable)
   6265 {
   6266 	struct wm_softc *sc = ifp->if_softc;
   6267 	struct wm_txsoft *txs;
   6268 	int i, qidx;
   6269 
   6270 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6271 		device_xname(sc->sc_dev), __func__));
   6272 	KASSERT(WM_CORE_LOCKED(sc));
   6273 
   6274 	wm_set_stopping_flags(sc);
   6275 
   6276 	/* Stop the one second clock. */
   6277 	callout_stop(&sc->sc_tick_ch);
   6278 
   6279 	/* Stop the 82547 Tx FIFO stall check timer. */
   6280 	if (sc->sc_type == WM_T_82547)
   6281 		callout_stop(&sc->sc_txfifo_ch);
   6282 
   6283 	if (sc->sc_flags & WM_F_HAS_MII) {
   6284 		/* Down the MII. */
   6285 		mii_down(&sc->sc_mii);
   6286 	} else {
   6287 #if 0
   6288 		/* Should we clear PHY's status properly? */
   6289 		wm_reset(sc);
   6290 #endif
   6291 	}
   6292 
   6293 	/* Stop the transmit and receive processes. */
   6294 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6295 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6296 	sc->sc_rctl &= ~RCTL_EN;
   6297 
   6298 	/*
   6299 	 * Clear the interrupt mask to ensure the device cannot assert its
   6300 	 * interrupt line.
   6301 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6302 	 * service any currently pending or shared interrupt.
   6303 	 */
   6304 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6305 	sc->sc_icr = 0;
   6306 	if (wm_is_using_msix(sc)) {
   6307 		if (sc->sc_type != WM_T_82574) {
   6308 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6309 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6310 		} else
   6311 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6312 	}
   6313 
   6314 	/* Release any queued transmit buffers. */
   6315 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6316 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6317 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6318 		mutex_enter(txq->txq_lock);
   6319 		txq->txq_sending = false; /* ensure watchdog disabled */
   6320 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6321 			txs = &txq->txq_soft[i];
   6322 			if (txs->txs_mbuf != NULL) {
   6323 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6324 				m_freem(txs->txs_mbuf);
   6325 				txs->txs_mbuf = NULL;
   6326 			}
   6327 		}
   6328 		mutex_exit(txq->txq_lock);
   6329 	}
   6330 
   6331 	/* Mark the interface as down and cancel the watchdog timer. */
   6332 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6333 
   6334 	if (disable) {
   6335 		for (i = 0; i < sc->sc_nqueues; i++) {
   6336 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6337 			mutex_enter(rxq->rxq_lock);
   6338 			wm_rxdrain(rxq);
   6339 			mutex_exit(rxq->rxq_lock);
   6340 		}
   6341 	}
   6342 
   6343 #if 0 /* notyet */
   6344 	if (sc->sc_type >= WM_T_82544)
   6345 		CSR_WRITE(sc, WMREG_WUC, 0);
   6346 #endif
   6347 }
   6348 
   6349 static void
   6350 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6351 {
   6352 	struct mbuf *m;
   6353 	int i;
   6354 
   6355 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6356 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6357 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6358 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6359 		    m->m_data, m->m_len, m->m_flags);
   6360 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6361 	    i, i == 1 ? "" : "s");
   6362 }
   6363 
   6364 /*
   6365  * wm_82547_txfifo_stall:
   6366  *
   6367  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6368  *	reset the FIFO pointers, and restart packet transmission.
   6369  */
   6370 static void
   6371 wm_82547_txfifo_stall(void *arg)
   6372 {
   6373 	struct wm_softc *sc = arg;
   6374 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6375 
   6376 	mutex_enter(txq->txq_lock);
   6377 
   6378 	if (txq->txq_stopping)
   6379 		goto out;
   6380 
   6381 	if (txq->txq_fifo_stall) {
   6382 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6383 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6384 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6385 			/*
   6386 			 * Packets have drained.  Stop transmitter, reset
   6387 			 * FIFO pointers, restart transmitter, and kick
   6388 			 * the packet queue.
   6389 			 */
   6390 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6391 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6392 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6393 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6394 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6395 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6396 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6397 			CSR_WRITE_FLUSH(sc);
   6398 
   6399 			txq->txq_fifo_head = 0;
   6400 			txq->txq_fifo_stall = 0;
   6401 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6402 		} else {
   6403 			/*
   6404 			 * Still waiting for packets to drain; try again in
   6405 			 * another tick.
   6406 			 */
   6407 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6408 		}
   6409 	}
   6410 
   6411 out:
   6412 	mutex_exit(txq->txq_lock);
   6413 }
   6414 
   6415 /*
   6416  * wm_82547_txfifo_bugchk:
   6417  *
   6418  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6419  *	prevent enqueueing a packet that would wrap around the end
   6420  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6421  *
   6422  *	We do this by checking the amount of space before the end
   6423  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6424  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6425  *	the internal FIFO pointers to the beginning, and restart
   6426  *	transmission on the interface.
   6427  */
   6428 #define	WM_FIFO_HDR		0x10
   6429 #define	WM_82547_PAD_LEN	0x3e0
   6430 static int
   6431 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6432 {
   6433 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6434 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6435 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6436 
   6437 	/* Just return if already stalled. */
   6438 	if (txq->txq_fifo_stall)
   6439 		return 1;
   6440 
   6441 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6442 		/* Stall only occurs in half-duplex mode. */
   6443 		goto send_packet;
   6444 	}
   6445 
   6446 	if (len >= WM_82547_PAD_LEN + space) {
   6447 		txq->txq_fifo_stall = 1;
   6448 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6449 		return 1;
   6450 	}
   6451 
   6452  send_packet:
   6453 	txq->txq_fifo_head += len;
   6454 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6455 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6456 
   6457 	return 0;
   6458 }
   6459 
   6460 static int
   6461 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6462 {
   6463 	int error;
   6464 
   6465 	/*
   6466 	 * Allocate the control data structures, and create and load the
   6467 	 * DMA map for it.
   6468 	 *
   6469 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6470 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6471 	 * both sets within the same 4G segment.
   6472 	 */
   6473 	if (sc->sc_type < WM_T_82544)
   6474 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6475 	else
   6476 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6477 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6478 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6479 	else
   6480 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6481 
   6482 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6483 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6484 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6485 		aprint_error_dev(sc->sc_dev,
   6486 		    "unable to allocate TX control data, error = %d\n",
   6487 		    error);
   6488 		goto fail_0;
   6489 	}
   6490 
   6491 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6492 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6493 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6494 		aprint_error_dev(sc->sc_dev,
   6495 		    "unable to map TX control data, error = %d\n", error);
   6496 		goto fail_1;
   6497 	}
   6498 
   6499 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6500 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6501 		aprint_error_dev(sc->sc_dev,
   6502 		    "unable to create TX control data DMA map, error = %d\n",
   6503 		    error);
   6504 		goto fail_2;
   6505 	}
   6506 
   6507 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6508 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6509 		aprint_error_dev(sc->sc_dev,
   6510 		    "unable to load TX control data DMA map, error = %d\n",
   6511 		    error);
   6512 		goto fail_3;
   6513 	}
   6514 
   6515 	return 0;
   6516 
   6517  fail_3:
   6518 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6519  fail_2:
   6520 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6521 	    WM_TXDESCS_SIZE(txq));
   6522  fail_1:
   6523 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6524  fail_0:
   6525 	return error;
   6526 }
   6527 
   6528 static void
   6529 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6530 {
   6531 
   6532 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6533 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6534 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6535 	    WM_TXDESCS_SIZE(txq));
   6536 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6537 }
   6538 
   6539 static int
   6540 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6541 {
   6542 	int error;
   6543 	size_t rxq_descs_size;
   6544 
   6545 	/*
   6546 	 * Allocate the control data structures, and create and load the
   6547 	 * DMA map for it.
   6548 	 *
   6549 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6550 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6551 	 * both sets within the same 4G segment.
   6552 	 */
   6553 	rxq->rxq_ndesc = WM_NRXDESC;
   6554 	if (sc->sc_type == WM_T_82574)
   6555 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6556 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6557 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6558 	else
   6559 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6560 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6561 
   6562 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6563 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6564 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6565 		aprint_error_dev(sc->sc_dev,
   6566 		    "unable to allocate RX control data, error = %d\n",
   6567 		    error);
   6568 		goto fail_0;
   6569 	}
   6570 
   6571 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6572 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6573 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6574 		aprint_error_dev(sc->sc_dev,
   6575 		    "unable to map RX control data, error = %d\n", error);
   6576 		goto fail_1;
   6577 	}
   6578 
   6579 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6580 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6581 		aprint_error_dev(sc->sc_dev,
   6582 		    "unable to create RX control data DMA map, error = %d\n",
   6583 		    error);
   6584 		goto fail_2;
   6585 	}
   6586 
   6587 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6588 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6589 		aprint_error_dev(sc->sc_dev,
   6590 		    "unable to load RX control data DMA map, error = %d\n",
   6591 		    error);
   6592 		goto fail_3;
   6593 	}
   6594 
   6595 	return 0;
   6596 
   6597  fail_3:
   6598 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6599  fail_2:
   6600 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6601 	    rxq_descs_size);
   6602  fail_1:
   6603 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6604  fail_0:
   6605 	return error;
   6606 }
   6607 
   6608 static void
   6609 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6610 {
   6611 
   6612 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6613 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6614 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6615 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6616 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6617 }
   6618 
   6619 
   6620 static int
   6621 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6622 {
   6623 	int i, error;
   6624 
   6625 	/* Create the transmit buffer DMA maps. */
   6626 	WM_TXQUEUELEN(txq) =
   6627 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6628 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6629 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6630 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6631 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6632 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6633 			aprint_error_dev(sc->sc_dev,
   6634 			    "unable to create Tx DMA map %d, error = %d\n",
   6635 			    i, error);
   6636 			goto fail;
   6637 		}
   6638 	}
   6639 
   6640 	return 0;
   6641 
   6642  fail:
   6643 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6644 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6645 			bus_dmamap_destroy(sc->sc_dmat,
   6646 			    txq->txq_soft[i].txs_dmamap);
   6647 	}
   6648 	return error;
   6649 }
   6650 
   6651 static void
   6652 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6653 {
   6654 	int i;
   6655 
   6656 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6657 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6658 			bus_dmamap_destroy(sc->sc_dmat,
   6659 			    txq->txq_soft[i].txs_dmamap);
   6660 	}
   6661 }
   6662 
   6663 static int
   6664 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6665 {
   6666 	int i, error;
   6667 
   6668 	/* Create the receive buffer DMA maps. */
   6669 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6670 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6671 			    MCLBYTES, 0, 0,
   6672 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6673 			aprint_error_dev(sc->sc_dev,
   6674 			    "unable to create Rx DMA map %d error = %d\n",
   6675 			    i, error);
   6676 			goto fail;
   6677 		}
   6678 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6679 	}
   6680 
   6681 	return 0;
   6682 
   6683  fail:
   6684 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6685 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6686 			bus_dmamap_destroy(sc->sc_dmat,
   6687 			    rxq->rxq_soft[i].rxs_dmamap);
   6688 	}
   6689 	return error;
   6690 }
   6691 
   6692 static void
   6693 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6694 {
   6695 	int i;
   6696 
   6697 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6698 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6699 			bus_dmamap_destroy(sc->sc_dmat,
   6700 			    rxq->rxq_soft[i].rxs_dmamap);
   6701 	}
   6702 }
   6703 
   6704 /*
   6705  * wm_alloc_quques:
   6706  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6707  */
   6708 static int
   6709 wm_alloc_txrx_queues(struct wm_softc *sc)
   6710 {
   6711 	int i, error, tx_done, rx_done;
   6712 
   6713 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6714 	    KM_SLEEP);
   6715 	if (sc->sc_queue == NULL) {
   6716 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6717 		error = ENOMEM;
   6718 		goto fail_0;
   6719 	}
   6720 
   6721 	/*
   6722 	 * For transmission
   6723 	 */
   6724 	error = 0;
   6725 	tx_done = 0;
   6726 	for (i = 0; i < sc->sc_nqueues; i++) {
   6727 #ifdef WM_EVENT_COUNTERS
   6728 		int j;
   6729 		const char *xname;
   6730 #endif
   6731 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6732 		txq->txq_sc = sc;
   6733 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6734 
   6735 		error = wm_alloc_tx_descs(sc, txq);
   6736 		if (error)
   6737 			break;
   6738 		error = wm_alloc_tx_buffer(sc, txq);
   6739 		if (error) {
   6740 			wm_free_tx_descs(sc, txq);
   6741 			break;
   6742 		}
   6743 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6744 		if (txq->txq_interq == NULL) {
   6745 			wm_free_tx_descs(sc, txq);
   6746 			wm_free_tx_buffer(sc, txq);
   6747 			error = ENOMEM;
   6748 			break;
   6749 		}
   6750 
   6751 #ifdef WM_EVENT_COUNTERS
   6752 		xname = device_xname(sc->sc_dev);
   6753 
   6754 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6755 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6756 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6757 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6758 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6759 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6760 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6761 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6762 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6763 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6764 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6765 
   6766 		for (j = 0; j < WM_NTXSEGS; j++) {
   6767 			snprintf(txq->txq_txseg_evcnt_names[j],
   6768 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6769 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6770 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6771 		}
   6772 
   6773 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6774 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6775 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6776 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6777 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6778 #endif /* WM_EVENT_COUNTERS */
   6779 
   6780 		tx_done++;
   6781 	}
   6782 	if (error)
   6783 		goto fail_1;
   6784 
   6785 	/*
   6786 	 * For recieve
   6787 	 */
   6788 	error = 0;
   6789 	rx_done = 0;
   6790 	for (i = 0; i < sc->sc_nqueues; i++) {
   6791 #ifdef WM_EVENT_COUNTERS
   6792 		const char *xname;
   6793 #endif
   6794 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6795 		rxq->rxq_sc = sc;
   6796 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6797 
   6798 		error = wm_alloc_rx_descs(sc, rxq);
   6799 		if (error)
   6800 			break;
   6801 
   6802 		error = wm_alloc_rx_buffer(sc, rxq);
   6803 		if (error) {
   6804 			wm_free_rx_descs(sc, rxq);
   6805 			break;
   6806 		}
   6807 
   6808 #ifdef WM_EVENT_COUNTERS
   6809 		xname = device_xname(sc->sc_dev);
   6810 
   6811 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6812 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6813 
   6814 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6815 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6816 #endif /* WM_EVENT_COUNTERS */
   6817 
   6818 		rx_done++;
   6819 	}
   6820 	if (error)
   6821 		goto fail_2;
   6822 
   6823 	return 0;
   6824 
   6825  fail_2:
   6826 	for (i = 0; i < rx_done; i++) {
   6827 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6828 		wm_free_rx_buffer(sc, rxq);
   6829 		wm_free_rx_descs(sc, rxq);
   6830 		if (rxq->rxq_lock)
   6831 			mutex_obj_free(rxq->rxq_lock);
   6832 	}
   6833  fail_1:
   6834 	for (i = 0; i < tx_done; i++) {
   6835 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6836 		pcq_destroy(txq->txq_interq);
   6837 		wm_free_tx_buffer(sc, txq);
   6838 		wm_free_tx_descs(sc, txq);
   6839 		if (txq->txq_lock)
   6840 			mutex_obj_free(txq->txq_lock);
   6841 	}
   6842 
   6843 	kmem_free(sc->sc_queue,
   6844 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6845  fail_0:
   6846 	return error;
   6847 }
   6848 
   6849 /*
   6850  * wm_free_quques:
   6851  *	Free {tx,rx}descs and {tx,rx} buffers
   6852  */
   6853 static void
   6854 wm_free_txrx_queues(struct wm_softc *sc)
   6855 {
   6856 	int i;
   6857 
   6858 	for (i = 0; i < sc->sc_nqueues; i++) {
   6859 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6860 
   6861 #ifdef WM_EVENT_COUNTERS
   6862 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6863 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6864 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6865 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6866 #endif /* WM_EVENT_COUNTERS */
   6867 
   6868 		wm_free_rx_buffer(sc, rxq);
   6869 		wm_free_rx_descs(sc, rxq);
   6870 		if (rxq->rxq_lock)
   6871 			mutex_obj_free(rxq->rxq_lock);
   6872 	}
   6873 
   6874 	for (i = 0; i < sc->sc_nqueues; i++) {
   6875 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6876 		struct mbuf *m;
   6877 #ifdef WM_EVENT_COUNTERS
   6878 		int j;
   6879 
   6880 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6881 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6882 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6883 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6884 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6885 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6886 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6887 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6888 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6889 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6890 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6891 
   6892 		for (j = 0; j < WM_NTXSEGS; j++)
   6893 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6894 
   6895 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6896 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6897 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6898 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6899 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6900 #endif /* WM_EVENT_COUNTERS */
   6901 
   6902 		/* drain txq_interq */
   6903 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6904 			m_freem(m);
   6905 		pcq_destroy(txq->txq_interq);
   6906 
   6907 		wm_free_tx_buffer(sc, txq);
   6908 		wm_free_tx_descs(sc, txq);
   6909 		if (txq->txq_lock)
   6910 			mutex_obj_free(txq->txq_lock);
   6911 	}
   6912 
   6913 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6914 }
   6915 
   6916 static void
   6917 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6918 {
   6919 
   6920 	KASSERT(mutex_owned(txq->txq_lock));
   6921 
   6922 	/* Initialize the transmit descriptor ring. */
   6923 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6924 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6925 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6926 	txq->txq_free = WM_NTXDESC(txq);
   6927 	txq->txq_next = 0;
   6928 }
   6929 
   6930 static void
   6931 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6932     struct wm_txqueue *txq)
   6933 {
   6934 
   6935 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6936 		device_xname(sc->sc_dev), __func__));
   6937 	KASSERT(mutex_owned(txq->txq_lock));
   6938 
   6939 	if (sc->sc_type < WM_T_82543) {
   6940 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6941 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6942 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6943 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6944 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6945 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6946 	} else {
   6947 		int qid = wmq->wmq_id;
   6948 
   6949 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6950 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6951 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6952 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6953 
   6954 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6955 			/*
   6956 			 * Don't write TDT before TCTL.EN is set.
   6957 			 * See the document.
   6958 			 */
   6959 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6960 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6961 			    | TXDCTL_WTHRESH(0));
   6962 		else {
   6963 			/* XXX should update with AIM? */
   6964 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6965 			if (sc->sc_type >= WM_T_82540) {
   6966 				/* should be same */
   6967 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6968 			}
   6969 
   6970 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6971 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6972 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6973 		}
   6974 	}
   6975 }
   6976 
   6977 static void
   6978 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6979 {
   6980 	int i;
   6981 
   6982 	KASSERT(mutex_owned(txq->txq_lock));
   6983 
   6984 	/* Initialize the transmit job descriptors. */
   6985 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6986 		txq->txq_soft[i].txs_mbuf = NULL;
   6987 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6988 	txq->txq_snext = 0;
   6989 	txq->txq_sdirty = 0;
   6990 }
   6991 
   6992 static void
   6993 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6994     struct wm_txqueue *txq)
   6995 {
   6996 
   6997 	KASSERT(mutex_owned(txq->txq_lock));
   6998 
   6999 	/*
   7000 	 * Set up some register offsets that are different between
   7001 	 * the i82542 and the i82543 and later chips.
   7002 	 */
   7003 	if (sc->sc_type < WM_T_82543)
   7004 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7005 	else
   7006 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7007 
   7008 	wm_init_tx_descs(sc, txq);
   7009 	wm_init_tx_regs(sc, wmq, txq);
   7010 	wm_init_tx_buffer(sc, txq);
   7011 
   7012 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7013 	txq->txq_sending = false;
   7014 }
   7015 
   7016 static void
   7017 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7018     struct wm_rxqueue *rxq)
   7019 {
   7020 
   7021 	KASSERT(mutex_owned(rxq->rxq_lock));
   7022 
   7023 	/*
   7024 	 * Initialize the receive descriptor and receive job
   7025 	 * descriptor rings.
   7026 	 */
   7027 	if (sc->sc_type < WM_T_82543) {
   7028 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7029 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7030 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7031 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7032 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7033 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7034 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7035 
   7036 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7037 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7038 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7039 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7040 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7041 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7042 	} else {
   7043 		int qid = wmq->wmq_id;
   7044 
   7045 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7046 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7047 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7048 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7049 
   7050 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7051 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7052 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7053 
   7054 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7055 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7056 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7057 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7058 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7059 			    | RXDCTL_WTHRESH(1));
   7060 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7061 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7062 		} else {
   7063 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7064 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7065 			/* XXX should update with AIM? */
   7066 			CSR_WRITE(sc, WMREG_RDTR,
   7067 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7068 			/* MUST be same */
   7069 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7070 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7071 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7072 		}
   7073 	}
   7074 }
   7075 
   7076 static int
   7077 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7078 {
   7079 	struct wm_rxsoft *rxs;
   7080 	int error, i;
   7081 
   7082 	KASSERT(mutex_owned(rxq->rxq_lock));
   7083 
   7084 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7085 		rxs = &rxq->rxq_soft[i];
   7086 		if (rxs->rxs_mbuf == NULL) {
   7087 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7088 				log(LOG_ERR, "%s: unable to allocate or map "
   7089 				    "rx buffer %d, error = %d\n",
   7090 				    device_xname(sc->sc_dev), i, error);
   7091 				/*
   7092 				 * XXX Should attempt to run with fewer receive
   7093 				 * XXX buffers instead of just failing.
   7094 				 */
   7095 				wm_rxdrain(rxq);
   7096 				return ENOMEM;
   7097 			}
   7098 		} else {
   7099 			/*
   7100 			 * For 82575 and 82576, the RX descriptors must be
   7101 			 * initialized after the setting of RCTL.EN in
   7102 			 * wm_set_filter()
   7103 			 */
   7104 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7105 				wm_init_rxdesc(rxq, i);
   7106 		}
   7107 	}
   7108 	rxq->rxq_ptr = 0;
   7109 	rxq->rxq_discard = 0;
   7110 	WM_RXCHAIN_RESET(rxq);
   7111 
   7112 	return 0;
   7113 }
   7114 
   7115 static int
   7116 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7117     struct wm_rxqueue *rxq)
   7118 {
   7119 
   7120 	KASSERT(mutex_owned(rxq->rxq_lock));
   7121 
   7122 	/*
   7123 	 * Set up some register offsets that are different between
   7124 	 * the i82542 and the i82543 and later chips.
   7125 	 */
   7126 	if (sc->sc_type < WM_T_82543)
   7127 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7128 	else
   7129 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7130 
   7131 	wm_init_rx_regs(sc, wmq, rxq);
   7132 	return wm_init_rx_buffer(sc, rxq);
   7133 }
   7134 
   7135 /*
   7136  * wm_init_quques:
   7137  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7138  */
   7139 static int
   7140 wm_init_txrx_queues(struct wm_softc *sc)
   7141 {
   7142 	int i, error = 0;
   7143 
   7144 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7145 		device_xname(sc->sc_dev), __func__));
   7146 
   7147 	for (i = 0; i < sc->sc_nqueues; i++) {
   7148 		struct wm_queue *wmq = &sc->sc_queue[i];
   7149 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7150 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7151 
   7152 		/*
   7153 		 * TODO
   7154 		 * Currently, use constant variable instead of AIM.
   7155 		 * Furthermore, the interrupt interval of multiqueue which use
   7156 		 * polling mode is less than default value.
   7157 		 * More tuning and AIM are required.
   7158 		 */
   7159 		if (wm_is_using_multiqueue(sc))
   7160 			wmq->wmq_itr = 50;
   7161 		else
   7162 			wmq->wmq_itr = sc->sc_itr_init;
   7163 		wmq->wmq_set_itr = true;
   7164 
   7165 		mutex_enter(txq->txq_lock);
   7166 		wm_init_tx_queue(sc, wmq, txq);
   7167 		mutex_exit(txq->txq_lock);
   7168 
   7169 		mutex_enter(rxq->rxq_lock);
   7170 		error = wm_init_rx_queue(sc, wmq, rxq);
   7171 		mutex_exit(rxq->rxq_lock);
   7172 		if (error)
   7173 			break;
   7174 	}
   7175 
   7176 	return error;
   7177 }
   7178 
   7179 /*
   7180  * wm_tx_offload:
   7181  *
   7182  *	Set up TCP/IP checksumming parameters for the
   7183  *	specified packet.
   7184  */
   7185 static int
   7186 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7187     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7188 {
   7189 	struct mbuf *m0 = txs->txs_mbuf;
   7190 	struct livengood_tcpip_ctxdesc *t;
   7191 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7192 	uint32_t ipcse;
   7193 	struct ether_header *eh;
   7194 	int offset, iphl;
   7195 	uint8_t fields;
   7196 
   7197 	/*
   7198 	 * XXX It would be nice if the mbuf pkthdr had offset
   7199 	 * fields for the protocol headers.
   7200 	 */
   7201 
   7202 	eh = mtod(m0, struct ether_header *);
   7203 	switch (htons(eh->ether_type)) {
   7204 	case ETHERTYPE_IP:
   7205 	case ETHERTYPE_IPV6:
   7206 		offset = ETHER_HDR_LEN;
   7207 		break;
   7208 
   7209 	case ETHERTYPE_VLAN:
   7210 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7211 		break;
   7212 
   7213 	default:
   7214 		/*
   7215 		 * Don't support this protocol or encapsulation.
   7216 		 */
   7217 		*fieldsp = 0;
   7218 		*cmdp = 0;
   7219 		return 0;
   7220 	}
   7221 
   7222 	if ((m0->m_pkthdr.csum_flags &
   7223 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7224 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7225 	} else
   7226 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7227 
   7228 	ipcse = offset + iphl - 1;
   7229 
   7230 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7231 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7232 	seg = 0;
   7233 	fields = 0;
   7234 
   7235 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7236 		int hlen = offset + iphl;
   7237 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7238 
   7239 		if (__predict_false(m0->m_len <
   7240 				    (hlen + sizeof(struct tcphdr)))) {
   7241 			/*
   7242 			 * TCP/IP headers are not in the first mbuf; we need
   7243 			 * to do this the slow and painful way. Let's just
   7244 			 * hope this doesn't happen very often.
   7245 			 */
   7246 			struct tcphdr th;
   7247 
   7248 			WM_Q_EVCNT_INCR(txq, tsopain);
   7249 
   7250 			m_copydata(m0, hlen, sizeof(th), &th);
   7251 			if (v4) {
   7252 				struct ip ip;
   7253 
   7254 				m_copydata(m0, offset, sizeof(ip), &ip);
   7255 				ip.ip_len = 0;
   7256 				m_copyback(m0,
   7257 				    offset + offsetof(struct ip, ip_len),
   7258 				    sizeof(ip.ip_len), &ip.ip_len);
   7259 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7260 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7261 			} else {
   7262 				struct ip6_hdr ip6;
   7263 
   7264 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7265 				ip6.ip6_plen = 0;
   7266 				m_copyback(m0,
   7267 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7268 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7269 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7270 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7271 			}
   7272 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7273 			    sizeof(th.th_sum), &th.th_sum);
   7274 
   7275 			hlen += th.th_off << 2;
   7276 		} else {
   7277 			/*
   7278 			 * TCP/IP headers are in the first mbuf; we can do
   7279 			 * this the easy way.
   7280 			 */
   7281 			struct tcphdr *th;
   7282 
   7283 			if (v4) {
   7284 				struct ip *ip =
   7285 				    (void *)(mtod(m0, char *) + offset);
   7286 				th = (void *)(mtod(m0, char *) + hlen);
   7287 
   7288 				ip->ip_len = 0;
   7289 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7290 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7291 			} else {
   7292 				struct ip6_hdr *ip6 =
   7293 				    (void *)(mtod(m0, char *) + offset);
   7294 				th = (void *)(mtod(m0, char *) + hlen);
   7295 
   7296 				ip6->ip6_plen = 0;
   7297 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7298 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7299 			}
   7300 			hlen += th->th_off << 2;
   7301 		}
   7302 
   7303 		if (v4) {
   7304 			WM_Q_EVCNT_INCR(txq, tso);
   7305 			cmdlen |= WTX_TCPIP_CMD_IP;
   7306 		} else {
   7307 			WM_Q_EVCNT_INCR(txq, tso6);
   7308 			ipcse = 0;
   7309 		}
   7310 		cmd |= WTX_TCPIP_CMD_TSE;
   7311 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7312 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7313 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7314 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7315 	}
   7316 
   7317 	/*
   7318 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7319 	 * offload feature, if we load the context descriptor, we
   7320 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7321 	 */
   7322 
   7323 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7324 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7325 	    WTX_TCPIP_IPCSE(ipcse);
   7326 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7327 		WM_Q_EVCNT_INCR(txq, ipsum);
   7328 		fields |= WTX_IXSM;
   7329 	}
   7330 
   7331 	offset += iphl;
   7332 
   7333 	if (m0->m_pkthdr.csum_flags &
   7334 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7335 		WM_Q_EVCNT_INCR(txq, tusum);
   7336 		fields |= WTX_TXSM;
   7337 		tucs = WTX_TCPIP_TUCSS(offset) |
   7338 		    WTX_TCPIP_TUCSO(offset +
   7339 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7340 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7341 	} else if ((m0->m_pkthdr.csum_flags &
   7342 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7343 		WM_Q_EVCNT_INCR(txq, tusum6);
   7344 		fields |= WTX_TXSM;
   7345 		tucs = WTX_TCPIP_TUCSS(offset) |
   7346 		    WTX_TCPIP_TUCSO(offset +
   7347 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7348 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7349 	} else {
   7350 		/* Just initialize it to a valid TCP context. */
   7351 		tucs = WTX_TCPIP_TUCSS(offset) |
   7352 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7353 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7354 	}
   7355 
   7356 	/*
   7357 	 * We don't have to write context descriptor for every packet
   7358 	 * except for 82574. For 82574, we must write context descriptor
   7359 	 * for every packet when we use two descriptor queues.
   7360 	 * It would be overhead to write context descriptor for every packet,
   7361 	 * however it does not cause problems.
   7362 	 */
   7363 	/* Fill in the context descriptor. */
   7364 	t = (struct livengood_tcpip_ctxdesc *)
   7365 	    &txq->txq_descs[txq->txq_next];
   7366 	t->tcpip_ipcs = htole32(ipcs);
   7367 	t->tcpip_tucs = htole32(tucs);
   7368 	t->tcpip_cmdlen = htole32(cmdlen);
   7369 	t->tcpip_seg = htole32(seg);
   7370 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7371 
   7372 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7373 	txs->txs_ndesc++;
   7374 
   7375 	*cmdp = cmd;
   7376 	*fieldsp = fields;
   7377 
   7378 	return 0;
   7379 }
   7380 
   7381 static inline int
   7382 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7383 {
   7384 	struct wm_softc *sc = ifp->if_softc;
   7385 	u_int cpuid = cpu_index(curcpu());
   7386 
   7387 	/*
   7388 	 * Currently, simple distribute strategy.
   7389 	 * TODO:
   7390 	 * distribute by flowid(RSS has value).
   7391 	 */
   7392 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7393 }
   7394 
   7395 /*
   7396  * wm_start:		[ifnet interface function]
   7397  *
   7398  *	Start packet transmission on the interface.
   7399  */
   7400 static void
   7401 wm_start(struct ifnet *ifp)
   7402 {
   7403 	struct wm_softc *sc = ifp->if_softc;
   7404 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7405 
   7406 #ifdef WM_MPSAFE
   7407 	KASSERT(if_is_mpsafe(ifp));
   7408 #endif
   7409 	/*
   7410 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7411 	 */
   7412 
   7413 	mutex_enter(txq->txq_lock);
   7414 	if (!txq->txq_stopping)
   7415 		wm_start_locked(ifp);
   7416 	mutex_exit(txq->txq_lock);
   7417 }
   7418 
   7419 static void
   7420 wm_start_locked(struct ifnet *ifp)
   7421 {
   7422 	struct wm_softc *sc = ifp->if_softc;
   7423 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7424 
   7425 	wm_send_common_locked(ifp, txq, false);
   7426 }
   7427 
   7428 static int
   7429 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7430 {
   7431 	int qid;
   7432 	struct wm_softc *sc = ifp->if_softc;
   7433 	struct wm_txqueue *txq;
   7434 
   7435 	qid = wm_select_txqueue(ifp, m);
   7436 	txq = &sc->sc_queue[qid].wmq_txq;
   7437 
   7438 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7439 		m_freem(m);
   7440 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7441 		return ENOBUFS;
   7442 	}
   7443 
   7444 	/*
   7445 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7446 	 */
   7447 	ifp->if_obytes += m->m_pkthdr.len;
   7448 	if (m->m_flags & M_MCAST)
   7449 		ifp->if_omcasts++;
   7450 
   7451 	if (mutex_tryenter(txq->txq_lock)) {
   7452 		if (!txq->txq_stopping)
   7453 			wm_transmit_locked(ifp, txq);
   7454 		mutex_exit(txq->txq_lock);
   7455 	}
   7456 
   7457 	return 0;
   7458 }
   7459 
   7460 static void
   7461 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7462 {
   7463 
   7464 	wm_send_common_locked(ifp, txq, true);
   7465 }
   7466 
   7467 static void
   7468 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7469     bool is_transmit)
   7470 {
   7471 	struct wm_softc *sc = ifp->if_softc;
   7472 	struct mbuf *m0;
   7473 	struct wm_txsoft *txs;
   7474 	bus_dmamap_t dmamap;
   7475 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7476 	bus_addr_t curaddr;
   7477 	bus_size_t seglen, curlen;
   7478 	uint32_t cksumcmd;
   7479 	uint8_t cksumfields;
   7480 	bool remap = true;
   7481 
   7482 	KASSERT(mutex_owned(txq->txq_lock));
   7483 
   7484 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7485 		return;
   7486 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7487 		return;
   7488 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7489 		return;
   7490 
   7491 	/* Remember the previous number of free descriptors. */
   7492 	ofree = txq->txq_free;
   7493 
   7494 	/*
   7495 	 * Loop through the send queue, setting up transmit descriptors
   7496 	 * until we drain the queue, or use up all available transmit
   7497 	 * descriptors.
   7498 	 */
   7499 	for (;;) {
   7500 		m0 = NULL;
   7501 
   7502 		/* Get a work queue entry. */
   7503 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7504 			wm_txeof(txq, UINT_MAX);
   7505 			if (txq->txq_sfree == 0) {
   7506 				DPRINTF(WM_DEBUG_TX,
   7507 				    ("%s: TX: no free job descriptors\n",
   7508 					device_xname(sc->sc_dev)));
   7509 				WM_Q_EVCNT_INCR(txq, txsstall);
   7510 				break;
   7511 			}
   7512 		}
   7513 
   7514 		/* Grab a packet off the queue. */
   7515 		if (is_transmit)
   7516 			m0 = pcq_get(txq->txq_interq);
   7517 		else
   7518 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7519 		if (m0 == NULL)
   7520 			break;
   7521 
   7522 		DPRINTF(WM_DEBUG_TX,
   7523 		    ("%s: TX: have packet to transmit: %p\n",
   7524 			device_xname(sc->sc_dev), m0));
   7525 
   7526 		txs = &txq->txq_soft[txq->txq_snext];
   7527 		dmamap = txs->txs_dmamap;
   7528 
   7529 		use_tso = (m0->m_pkthdr.csum_flags &
   7530 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7531 
   7532 		/*
   7533 		 * So says the Linux driver:
   7534 		 * The controller does a simple calculation to make sure
   7535 		 * there is enough room in the FIFO before initiating the
   7536 		 * DMA for each buffer. The calc is:
   7537 		 *	4 = ceil(buffer len / MSS)
   7538 		 * To make sure we don't overrun the FIFO, adjust the max
   7539 		 * buffer len if the MSS drops.
   7540 		 */
   7541 		dmamap->dm_maxsegsz =
   7542 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7543 		    ? m0->m_pkthdr.segsz << 2
   7544 		    : WTX_MAX_LEN;
   7545 
   7546 		/*
   7547 		 * Load the DMA map.  If this fails, the packet either
   7548 		 * didn't fit in the allotted number of segments, or we
   7549 		 * were short on resources.  For the too-many-segments
   7550 		 * case, we simply report an error and drop the packet,
   7551 		 * since we can't sanely copy a jumbo packet to a single
   7552 		 * buffer.
   7553 		 */
   7554 retry:
   7555 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7556 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7557 		if (__predict_false(error)) {
   7558 			if (error == EFBIG) {
   7559 				if (remap == true) {
   7560 					struct mbuf *m;
   7561 
   7562 					remap = false;
   7563 					m = m_defrag(m0, M_NOWAIT);
   7564 					if (m != NULL) {
   7565 						WM_Q_EVCNT_INCR(txq, defrag);
   7566 						m0 = m;
   7567 						goto retry;
   7568 					}
   7569 				}
   7570 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7571 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7572 				    "DMA segments, dropping...\n",
   7573 				    device_xname(sc->sc_dev));
   7574 				wm_dump_mbuf_chain(sc, m0);
   7575 				m_freem(m0);
   7576 				continue;
   7577 			}
   7578 			/*  Short on resources, just stop for now. */
   7579 			DPRINTF(WM_DEBUG_TX,
   7580 			    ("%s: TX: dmamap load failed: %d\n",
   7581 				device_xname(sc->sc_dev), error));
   7582 			break;
   7583 		}
   7584 
   7585 		segs_needed = dmamap->dm_nsegs;
   7586 		if (use_tso) {
   7587 			/* For sentinel descriptor; see below. */
   7588 			segs_needed++;
   7589 		}
   7590 
   7591 		/*
   7592 		 * Ensure we have enough descriptors free to describe
   7593 		 * the packet. Note, we always reserve one descriptor
   7594 		 * at the end of the ring due to the semantics of the
   7595 		 * TDT register, plus one more in the event we need
   7596 		 * to load offload context.
   7597 		 */
   7598 		if (segs_needed > txq->txq_free - 2) {
   7599 			/*
   7600 			 * Not enough free descriptors to transmit this
   7601 			 * packet.  We haven't committed anything yet,
   7602 			 * so just unload the DMA map, put the packet
   7603 			 * pack on the queue, and punt. Notify the upper
   7604 			 * layer that there are no more slots left.
   7605 			 */
   7606 			DPRINTF(WM_DEBUG_TX,
   7607 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7608 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7609 				segs_needed, txq->txq_free - 1));
   7610 			if (!is_transmit)
   7611 				ifp->if_flags |= IFF_OACTIVE;
   7612 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7613 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7614 			WM_Q_EVCNT_INCR(txq, txdstall);
   7615 			break;
   7616 		}
   7617 
   7618 		/*
   7619 		 * Check for 82547 Tx FIFO bug. We need to do this
   7620 		 * once we know we can transmit the packet, since we
   7621 		 * do some internal FIFO space accounting here.
   7622 		 */
   7623 		if (sc->sc_type == WM_T_82547 &&
   7624 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7625 			DPRINTF(WM_DEBUG_TX,
   7626 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7627 				device_xname(sc->sc_dev)));
   7628 			if (!is_transmit)
   7629 				ifp->if_flags |= IFF_OACTIVE;
   7630 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7631 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7632 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7633 			break;
   7634 		}
   7635 
   7636 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7637 
   7638 		DPRINTF(WM_DEBUG_TX,
   7639 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7640 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7641 
   7642 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7643 
   7644 		/*
   7645 		 * Store a pointer to the packet so that we can free it
   7646 		 * later.
   7647 		 *
   7648 		 * Initially, we consider the number of descriptors the
   7649 		 * packet uses the number of DMA segments.  This may be
   7650 		 * incremented by 1 if we do checksum offload (a descriptor
   7651 		 * is used to set the checksum context).
   7652 		 */
   7653 		txs->txs_mbuf = m0;
   7654 		txs->txs_firstdesc = txq->txq_next;
   7655 		txs->txs_ndesc = segs_needed;
   7656 
   7657 		/* Set up offload parameters for this packet. */
   7658 		if (m0->m_pkthdr.csum_flags &
   7659 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7660 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7661 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7662 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7663 					  &cksumfields) != 0) {
   7664 				/* Error message already displayed. */
   7665 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7666 				continue;
   7667 			}
   7668 		} else {
   7669 			cksumcmd = 0;
   7670 			cksumfields = 0;
   7671 		}
   7672 
   7673 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7674 
   7675 		/* Sync the DMA map. */
   7676 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7677 		    BUS_DMASYNC_PREWRITE);
   7678 
   7679 		/* Initialize the transmit descriptor. */
   7680 		for (nexttx = txq->txq_next, seg = 0;
   7681 		     seg < dmamap->dm_nsegs; seg++) {
   7682 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7683 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7684 			     seglen != 0;
   7685 			     curaddr += curlen, seglen -= curlen,
   7686 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7687 				curlen = seglen;
   7688 
   7689 				/*
   7690 				 * So says the Linux driver:
   7691 				 * Work around for premature descriptor
   7692 				 * write-backs in TSO mode.  Append a
   7693 				 * 4-byte sentinel descriptor.
   7694 				 */
   7695 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7696 				    curlen > 8)
   7697 					curlen -= 4;
   7698 
   7699 				wm_set_dma_addr(
   7700 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7701 				txq->txq_descs[nexttx].wtx_cmdlen
   7702 				    = htole32(cksumcmd | curlen);
   7703 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7704 				    = 0;
   7705 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7706 				    = cksumfields;
   7707 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7708 				lasttx = nexttx;
   7709 
   7710 				DPRINTF(WM_DEBUG_TX,
   7711 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7712 					"len %#04zx\n",
   7713 					device_xname(sc->sc_dev), nexttx,
   7714 					(uint64_t)curaddr, curlen));
   7715 			}
   7716 		}
   7717 
   7718 		KASSERT(lasttx != -1);
   7719 
   7720 		/*
   7721 		 * Set up the command byte on the last descriptor of
   7722 		 * the packet. If we're in the interrupt delay window,
   7723 		 * delay the interrupt.
   7724 		 */
   7725 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7726 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7727 
   7728 		/*
   7729 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7730 		 * up the descriptor to encapsulate the packet for us.
   7731 		 *
   7732 		 * This is only valid on the last descriptor of the packet.
   7733 		 */
   7734 		if (vlan_has_tag(m0)) {
   7735 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7736 			    htole32(WTX_CMD_VLE);
   7737 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7738 			    = htole16(vlan_get_tag(m0));
   7739 		}
   7740 
   7741 		txs->txs_lastdesc = lasttx;
   7742 
   7743 		DPRINTF(WM_DEBUG_TX,
   7744 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7745 			device_xname(sc->sc_dev),
   7746 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7747 
   7748 		/* Sync the descriptors we're using. */
   7749 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7750 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7751 
   7752 		/* Give the packet to the chip. */
   7753 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7754 
   7755 		DPRINTF(WM_DEBUG_TX,
   7756 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7757 
   7758 		DPRINTF(WM_DEBUG_TX,
   7759 		    ("%s: TX: finished transmitting packet, job %d\n",
   7760 			device_xname(sc->sc_dev), txq->txq_snext));
   7761 
   7762 		/* Advance the tx pointer. */
   7763 		txq->txq_free -= txs->txs_ndesc;
   7764 		txq->txq_next = nexttx;
   7765 
   7766 		txq->txq_sfree--;
   7767 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7768 
   7769 		/* Pass the packet to any BPF listeners. */
   7770 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7771 	}
   7772 
   7773 	if (m0 != NULL) {
   7774 		if (!is_transmit)
   7775 			ifp->if_flags |= IFF_OACTIVE;
   7776 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7777 		WM_Q_EVCNT_INCR(txq, descdrop);
   7778 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7779 			__func__));
   7780 		m_freem(m0);
   7781 	}
   7782 
   7783 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7784 		/* No more slots; notify upper layer. */
   7785 		if (!is_transmit)
   7786 			ifp->if_flags |= IFF_OACTIVE;
   7787 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7788 	}
   7789 
   7790 	if (txq->txq_free != ofree) {
   7791 		/* Set a watchdog timer in case the chip flakes out. */
   7792 		txq->txq_lastsent = time_uptime;
   7793 		txq->txq_sending = true;
   7794 	}
   7795 }
   7796 
   7797 /*
   7798  * wm_nq_tx_offload:
   7799  *
   7800  *	Set up TCP/IP checksumming parameters for the
   7801  *	specified packet, for NEWQUEUE devices
   7802  */
   7803 static int
   7804 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7805     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7806 {
   7807 	struct mbuf *m0 = txs->txs_mbuf;
   7808 	uint32_t vl_len, mssidx, cmdc;
   7809 	struct ether_header *eh;
   7810 	int offset, iphl;
   7811 
   7812 	/*
   7813 	 * XXX It would be nice if the mbuf pkthdr had offset
   7814 	 * fields for the protocol headers.
   7815 	 */
   7816 	*cmdlenp = 0;
   7817 	*fieldsp = 0;
   7818 
   7819 	eh = mtod(m0, struct ether_header *);
   7820 	switch (htons(eh->ether_type)) {
   7821 	case ETHERTYPE_IP:
   7822 	case ETHERTYPE_IPV6:
   7823 		offset = ETHER_HDR_LEN;
   7824 		break;
   7825 
   7826 	case ETHERTYPE_VLAN:
   7827 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7828 		break;
   7829 
   7830 	default:
   7831 		/* Don't support this protocol or encapsulation. */
   7832 		*do_csum = false;
   7833 		return 0;
   7834 	}
   7835 	*do_csum = true;
   7836 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7837 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7838 
   7839 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7840 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7841 
   7842 	if ((m0->m_pkthdr.csum_flags &
   7843 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7844 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7845 	} else {
   7846 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7847 	}
   7848 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7849 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7850 
   7851 	if (vlan_has_tag(m0)) {
   7852 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7853 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7854 		*cmdlenp |= NQTX_CMD_VLE;
   7855 	}
   7856 
   7857 	mssidx = 0;
   7858 
   7859 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7860 		int hlen = offset + iphl;
   7861 		int tcp_hlen;
   7862 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7863 
   7864 		if (__predict_false(m0->m_len <
   7865 				    (hlen + sizeof(struct tcphdr)))) {
   7866 			/*
   7867 			 * TCP/IP headers are not in the first mbuf; we need
   7868 			 * to do this the slow and painful way. Let's just
   7869 			 * hope this doesn't happen very often.
   7870 			 */
   7871 			struct tcphdr th;
   7872 
   7873 			WM_Q_EVCNT_INCR(txq, tsopain);
   7874 
   7875 			m_copydata(m0, hlen, sizeof(th), &th);
   7876 			if (v4) {
   7877 				struct ip ip;
   7878 
   7879 				m_copydata(m0, offset, sizeof(ip), &ip);
   7880 				ip.ip_len = 0;
   7881 				m_copyback(m0,
   7882 				    offset + offsetof(struct ip, ip_len),
   7883 				    sizeof(ip.ip_len), &ip.ip_len);
   7884 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7885 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7886 			} else {
   7887 				struct ip6_hdr ip6;
   7888 
   7889 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7890 				ip6.ip6_plen = 0;
   7891 				m_copyback(m0,
   7892 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7893 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7894 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7895 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7896 			}
   7897 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7898 			    sizeof(th.th_sum), &th.th_sum);
   7899 
   7900 			tcp_hlen = th.th_off << 2;
   7901 		} else {
   7902 			/*
   7903 			 * TCP/IP headers are in the first mbuf; we can do
   7904 			 * this the easy way.
   7905 			 */
   7906 			struct tcphdr *th;
   7907 
   7908 			if (v4) {
   7909 				struct ip *ip =
   7910 				    (void *)(mtod(m0, char *) + offset);
   7911 				th = (void *)(mtod(m0, char *) + hlen);
   7912 
   7913 				ip->ip_len = 0;
   7914 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7915 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7916 			} else {
   7917 				struct ip6_hdr *ip6 =
   7918 				    (void *)(mtod(m0, char *) + offset);
   7919 				th = (void *)(mtod(m0, char *) + hlen);
   7920 
   7921 				ip6->ip6_plen = 0;
   7922 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7923 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7924 			}
   7925 			tcp_hlen = th->th_off << 2;
   7926 		}
   7927 		hlen += tcp_hlen;
   7928 		*cmdlenp |= NQTX_CMD_TSE;
   7929 
   7930 		if (v4) {
   7931 			WM_Q_EVCNT_INCR(txq, tso);
   7932 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7933 		} else {
   7934 			WM_Q_EVCNT_INCR(txq, tso6);
   7935 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7936 		}
   7937 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7938 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7939 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7940 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7941 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7942 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7943 	} else {
   7944 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7945 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7946 	}
   7947 
   7948 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7949 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7950 		cmdc |= NQTXC_CMD_IP4;
   7951 	}
   7952 
   7953 	if (m0->m_pkthdr.csum_flags &
   7954 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7955 		WM_Q_EVCNT_INCR(txq, tusum);
   7956 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7957 			cmdc |= NQTXC_CMD_TCP;
   7958 		else
   7959 			cmdc |= NQTXC_CMD_UDP;
   7960 
   7961 		cmdc |= NQTXC_CMD_IP4;
   7962 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7963 	}
   7964 	if (m0->m_pkthdr.csum_flags &
   7965 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7966 		WM_Q_EVCNT_INCR(txq, tusum6);
   7967 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7968 			cmdc |= NQTXC_CMD_TCP;
   7969 		else
   7970 			cmdc |= NQTXC_CMD_UDP;
   7971 
   7972 		cmdc |= NQTXC_CMD_IP6;
   7973 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7974 	}
   7975 
   7976 	/*
   7977 	 * We don't have to write context descriptor for every packet to
   7978 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7979 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7980 	 * controllers.
   7981 	 * It would be overhead to write context descriptor for every packet,
   7982 	 * however it does not cause problems.
   7983 	 */
   7984 	/* Fill in the context descriptor. */
   7985 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7986 	    htole32(vl_len);
   7987 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7988 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7989 	    htole32(cmdc);
   7990 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7991 	    htole32(mssidx);
   7992 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7993 	DPRINTF(WM_DEBUG_TX,
   7994 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7995 		txq->txq_next, 0, vl_len));
   7996 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7997 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7998 	txs->txs_ndesc++;
   7999 	return 0;
   8000 }
   8001 
   8002 /*
   8003  * wm_nq_start:		[ifnet interface function]
   8004  *
   8005  *	Start packet transmission on the interface for NEWQUEUE devices
   8006  */
   8007 static void
   8008 wm_nq_start(struct ifnet *ifp)
   8009 {
   8010 	struct wm_softc *sc = ifp->if_softc;
   8011 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8012 
   8013 #ifdef WM_MPSAFE
   8014 	KASSERT(if_is_mpsafe(ifp));
   8015 #endif
   8016 	/*
   8017 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   8018 	 */
   8019 
   8020 	mutex_enter(txq->txq_lock);
   8021 	if (!txq->txq_stopping)
   8022 		wm_nq_start_locked(ifp);
   8023 	mutex_exit(txq->txq_lock);
   8024 }
   8025 
   8026 static void
   8027 wm_nq_start_locked(struct ifnet *ifp)
   8028 {
   8029 	struct wm_softc *sc = ifp->if_softc;
   8030 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8031 
   8032 	wm_nq_send_common_locked(ifp, txq, false);
   8033 }
   8034 
   8035 static int
   8036 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8037 {
   8038 	int qid;
   8039 	struct wm_softc *sc = ifp->if_softc;
   8040 	struct wm_txqueue *txq;
   8041 
   8042 	qid = wm_select_txqueue(ifp, m);
   8043 	txq = &sc->sc_queue[qid].wmq_txq;
   8044 
   8045 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8046 		m_freem(m);
   8047 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8048 		return ENOBUFS;
   8049 	}
   8050 
   8051 	/*
   8052 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   8053 	 */
   8054 	ifp->if_obytes += m->m_pkthdr.len;
   8055 	if (m->m_flags & M_MCAST)
   8056 		ifp->if_omcasts++;
   8057 
   8058 	/*
   8059 	 * The situations which this mutex_tryenter() fails at running time
   8060 	 * are below two patterns.
   8061 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8062 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8063 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8064 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8065 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8066 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8067 	 * stuck, either.
   8068 	 */
   8069 	if (mutex_tryenter(txq->txq_lock)) {
   8070 		if (!txq->txq_stopping)
   8071 			wm_nq_transmit_locked(ifp, txq);
   8072 		mutex_exit(txq->txq_lock);
   8073 	}
   8074 
   8075 	return 0;
   8076 }
   8077 
   8078 static void
   8079 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8080 {
   8081 
   8082 	wm_nq_send_common_locked(ifp, txq, true);
   8083 }
   8084 
   8085 static void
   8086 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8087     bool is_transmit)
   8088 {
   8089 	struct wm_softc *sc = ifp->if_softc;
   8090 	struct mbuf *m0;
   8091 	struct wm_txsoft *txs;
   8092 	bus_dmamap_t dmamap;
   8093 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8094 	bool do_csum, sent;
   8095 	bool remap = true;
   8096 
   8097 	KASSERT(mutex_owned(txq->txq_lock));
   8098 
   8099 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8100 		return;
   8101 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   8102 		return;
   8103 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8104 		return;
   8105 
   8106 	sent = false;
   8107 
   8108 	/*
   8109 	 * Loop through the send queue, setting up transmit descriptors
   8110 	 * until we drain the queue, or use up all available transmit
   8111 	 * descriptors.
   8112 	 */
   8113 	for (;;) {
   8114 		m0 = NULL;
   8115 
   8116 		/* Get a work queue entry. */
   8117 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8118 			wm_txeof(txq, UINT_MAX);
   8119 			if (txq->txq_sfree == 0) {
   8120 				DPRINTF(WM_DEBUG_TX,
   8121 				    ("%s: TX: no free job descriptors\n",
   8122 					device_xname(sc->sc_dev)));
   8123 				WM_Q_EVCNT_INCR(txq, txsstall);
   8124 				break;
   8125 			}
   8126 		}
   8127 
   8128 		/* Grab a packet off the queue. */
   8129 		if (is_transmit)
   8130 			m0 = pcq_get(txq->txq_interq);
   8131 		else
   8132 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8133 		if (m0 == NULL)
   8134 			break;
   8135 
   8136 		DPRINTF(WM_DEBUG_TX,
   8137 		    ("%s: TX: have packet to transmit: %p\n",
   8138 		    device_xname(sc->sc_dev), m0));
   8139 
   8140 		txs = &txq->txq_soft[txq->txq_snext];
   8141 		dmamap = txs->txs_dmamap;
   8142 
   8143 		/*
   8144 		 * Load the DMA map.  If this fails, the packet either
   8145 		 * didn't fit in the allotted number of segments, or we
   8146 		 * were short on resources.  For the too-many-segments
   8147 		 * case, we simply report an error and drop the packet,
   8148 		 * since we can't sanely copy a jumbo packet to a single
   8149 		 * buffer.
   8150 		 */
   8151 retry:
   8152 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8153 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8154 		if (__predict_false(error)) {
   8155 			if (error == EFBIG) {
   8156 				if (remap == true) {
   8157 					struct mbuf *m;
   8158 
   8159 					remap = false;
   8160 					m = m_defrag(m0, M_NOWAIT);
   8161 					if (m != NULL) {
   8162 						WM_Q_EVCNT_INCR(txq, defrag);
   8163 						m0 = m;
   8164 						goto retry;
   8165 					}
   8166 				}
   8167 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8168 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8169 				    "DMA segments, dropping...\n",
   8170 				    device_xname(sc->sc_dev));
   8171 				wm_dump_mbuf_chain(sc, m0);
   8172 				m_freem(m0);
   8173 				continue;
   8174 			}
   8175 			/* Short on resources, just stop for now. */
   8176 			DPRINTF(WM_DEBUG_TX,
   8177 			    ("%s: TX: dmamap load failed: %d\n",
   8178 				device_xname(sc->sc_dev), error));
   8179 			break;
   8180 		}
   8181 
   8182 		segs_needed = dmamap->dm_nsegs;
   8183 
   8184 		/*
   8185 		 * Ensure we have enough descriptors free to describe
   8186 		 * the packet. Note, we always reserve one descriptor
   8187 		 * at the end of the ring due to the semantics of the
   8188 		 * TDT register, plus one more in the event we need
   8189 		 * to load offload context.
   8190 		 */
   8191 		if (segs_needed > txq->txq_free - 2) {
   8192 			/*
   8193 			 * Not enough free descriptors to transmit this
   8194 			 * packet.  We haven't committed anything yet,
   8195 			 * so just unload the DMA map, put the packet
   8196 			 * pack on the queue, and punt. Notify the upper
   8197 			 * layer that there are no more slots left.
   8198 			 */
   8199 			DPRINTF(WM_DEBUG_TX,
   8200 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8201 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8202 				segs_needed, txq->txq_free - 1));
   8203 			if (!is_transmit)
   8204 				ifp->if_flags |= IFF_OACTIVE;
   8205 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8206 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8207 			WM_Q_EVCNT_INCR(txq, txdstall);
   8208 			break;
   8209 		}
   8210 
   8211 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8212 
   8213 		DPRINTF(WM_DEBUG_TX,
   8214 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8215 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8216 
   8217 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8218 
   8219 		/*
   8220 		 * Store a pointer to the packet so that we can free it
   8221 		 * later.
   8222 		 *
   8223 		 * Initially, we consider the number of descriptors the
   8224 		 * packet uses the number of DMA segments.  This may be
   8225 		 * incremented by 1 if we do checksum offload (a descriptor
   8226 		 * is used to set the checksum context).
   8227 		 */
   8228 		txs->txs_mbuf = m0;
   8229 		txs->txs_firstdesc = txq->txq_next;
   8230 		txs->txs_ndesc = segs_needed;
   8231 
   8232 		/* Set up offload parameters for this packet. */
   8233 		uint32_t cmdlen, fields, dcmdlen;
   8234 		if (m0->m_pkthdr.csum_flags &
   8235 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8236 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8237 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8238 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8239 			    &do_csum) != 0) {
   8240 				/* Error message already displayed. */
   8241 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8242 				continue;
   8243 			}
   8244 		} else {
   8245 			do_csum = false;
   8246 			cmdlen = 0;
   8247 			fields = 0;
   8248 		}
   8249 
   8250 		/* Sync the DMA map. */
   8251 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8252 		    BUS_DMASYNC_PREWRITE);
   8253 
   8254 		/* Initialize the first transmit descriptor. */
   8255 		nexttx = txq->txq_next;
   8256 		if (!do_csum) {
   8257 			/* setup a legacy descriptor */
   8258 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8259 			    dmamap->dm_segs[0].ds_addr);
   8260 			txq->txq_descs[nexttx].wtx_cmdlen =
   8261 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8262 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8263 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8264 			if (vlan_has_tag(m0)) {
   8265 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8266 				    htole32(WTX_CMD_VLE);
   8267 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8268 				    htole16(vlan_get_tag(m0));
   8269 			} else
   8270 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8271 
   8272 			dcmdlen = 0;
   8273 		} else {
   8274 			/* setup an advanced data descriptor */
   8275 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8276 			    htole64(dmamap->dm_segs[0].ds_addr);
   8277 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8278 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8279 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8280 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8281 			    htole32(fields);
   8282 			DPRINTF(WM_DEBUG_TX,
   8283 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8284 				device_xname(sc->sc_dev), nexttx,
   8285 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8286 			DPRINTF(WM_DEBUG_TX,
   8287 			    ("\t 0x%08x%08x\n", fields,
   8288 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8289 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8290 		}
   8291 
   8292 		lasttx = nexttx;
   8293 		nexttx = WM_NEXTTX(txq, nexttx);
   8294 		/*
   8295 		 * fill in the next descriptors. legacy or advanced format
   8296 		 * is the same here
   8297 		 */
   8298 		for (seg = 1; seg < dmamap->dm_nsegs;
   8299 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8300 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8301 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8302 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8303 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8304 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8305 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8306 			lasttx = nexttx;
   8307 
   8308 			DPRINTF(WM_DEBUG_TX,
   8309 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8310 				device_xname(sc->sc_dev), nexttx,
   8311 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8312 				dmamap->dm_segs[seg].ds_len));
   8313 		}
   8314 
   8315 		KASSERT(lasttx != -1);
   8316 
   8317 		/*
   8318 		 * Set up the command byte on the last descriptor of
   8319 		 * the packet. If we're in the interrupt delay window,
   8320 		 * delay the interrupt.
   8321 		 */
   8322 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8323 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8324 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8325 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8326 
   8327 		txs->txs_lastdesc = lasttx;
   8328 
   8329 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8330 		    device_xname(sc->sc_dev),
   8331 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8332 
   8333 		/* Sync the descriptors we're using. */
   8334 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8335 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8336 
   8337 		/* Give the packet to the chip. */
   8338 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8339 		sent = true;
   8340 
   8341 		DPRINTF(WM_DEBUG_TX,
   8342 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8343 
   8344 		DPRINTF(WM_DEBUG_TX,
   8345 		    ("%s: TX: finished transmitting packet, job %d\n",
   8346 			device_xname(sc->sc_dev), txq->txq_snext));
   8347 
   8348 		/* Advance the tx pointer. */
   8349 		txq->txq_free -= txs->txs_ndesc;
   8350 		txq->txq_next = nexttx;
   8351 
   8352 		txq->txq_sfree--;
   8353 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8354 
   8355 		/* Pass the packet to any BPF listeners. */
   8356 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8357 	}
   8358 
   8359 	if (m0 != NULL) {
   8360 		if (!is_transmit)
   8361 			ifp->if_flags |= IFF_OACTIVE;
   8362 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8363 		WM_Q_EVCNT_INCR(txq, descdrop);
   8364 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8365 			__func__));
   8366 		m_freem(m0);
   8367 	}
   8368 
   8369 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8370 		/* No more slots; notify upper layer. */
   8371 		if (!is_transmit)
   8372 			ifp->if_flags |= IFF_OACTIVE;
   8373 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8374 	}
   8375 
   8376 	if (sent) {
   8377 		/* Set a watchdog timer in case the chip flakes out. */
   8378 		txq->txq_lastsent = time_uptime;
   8379 		txq->txq_sending = true;
   8380 	}
   8381 }
   8382 
   8383 static void
   8384 wm_deferred_start_locked(struct wm_txqueue *txq)
   8385 {
   8386 	struct wm_softc *sc = txq->txq_sc;
   8387 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8388 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8389 	int qid = wmq->wmq_id;
   8390 
   8391 	KASSERT(mutex_owned(txq->txq_lock));
   8392 
   8393 	if (txq->txq_stopping) {
   8394 		mutex_exit(txq->txq_lock);
   8395 		return;
   8396 	}
   8397 
   8398 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8399 		/* XXX need for ALTQ or one CPU system */
   8400 		if (qid == 0)
   8401 			wm_nq_start_locked(ifp);
   8402 		wm_nq_transmit_locked(ifp, txq);
   8403 	} else {
   8404 		/* XXX need for ALTQ or one CPU system */
   8405 		if (qid == 0)
   8406 			wm_start_locked(ifp);
   8407 		wm_transmit_locked(ifp, txq);
   8408 	}
   8409 }
   8410 
   8411 /* Interrupt */
   8412 
   8413 /*
   8414  * wm_txeof:
   8415  *
   8416  *	Helper; handle transmit interrupts.
   8417  */
   8418 static bool
   8419 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8420 {
   8421 	struct wm_softc *sc = txq->txq_sc;
   8422 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8423 	struct wm_txsoft *txs;
   8424 	int count = 0;
   8425 	int i;
   8426 	uint8_t status;
   8427 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8428 	bool more = false;
   8429 
   8430 	KASSERT(mutex_owned(txq->txq_lock));
   8431 
   8432 	if (txq->txq_stopping)
   8433 		return false;
   8434 
   8435 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8436 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8437 	if (wmq->wmq_id == 0)
   8438 		ifp->if_flags &= ~IFF_OACTIVE;
   8439 
   8440 	/*
   8441 	 * Go through the Tx list and free mbufs for those
   8442 	 * frames which have been transmitted.
   8443 	 */
   8444 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8445 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8446 		if (limit-- == 0) {
   8447 			more = true;
   8448 			DPRINTF(WM_DEBUG_TX,
   8449 			    ("%s: TX: loop limited, job %d is not processed\n",
   8450 				device_xname(sc->sc_dev), i));
   8451 			break;
   8452 		}
   8453 
   8454 		txs = &txq->txq_soft[i];
   8455 
   8456 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8457 			device_xname(sc->sc_dev), i));
   8458 
   8459 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8460 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8461 
   8462 		status =
   8463 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8464 		if ((status & WTX_ST_DD) == 0) {
   8465 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8466 			    BUS_DMASYNC_PREREAD);
   8467 			break;
   8468 		}
   8469 
   8470 		count++;
   8471 		DPRINTF(WM_DEBUG_TX,
   8472 		    ("%s: TX: job %d done: descs %d..%d\n",
   8473 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8474 		    txs->txs_lastdesc));
   8475 
   8476 		/*
   8477 		 * XXX We should probably be using the statistics
   8478 		 * XXX registers, but I don't know if they exist
   8479 		 * XXX on chips before the i82544.
   8480 		 */
   8481 
   8482 #ifdef WM_EVENT_COUNTERS
   8483 		if (status & WTX_ST_TU)
   8484 			WM_Q_EVCNT_INCR(txq, underrun);
   8485 #endif /* WM_EVENT_COUNTERS */
   8486 
   8487 		/*
   8488 		 * 82574 and newer's document says the status field has neither
   8489 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8490 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8491 		 * Developer's Manual", 82574 datasheet and newer.
   8492 		 *
   8493 		 * XXX I saw the LC bit was set on I218 even though the media
   8494 		 * was full duplex, so the bit might be used for other
   8495 		 * meaning ...(I have no document).
   8496 		 */
   8497 
   8498 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8499 		    && ((sc->sc_type < WM_T_82574)
   8500 			|| (sc->sc_type == WM_T_80003))) {
   8501 			ifp->if_oerrors++;
   8502 			if (status & WTX_ST_LC)
   8503 				log(LOG_WARNING, "%s: late collision\n",
   8504 				    device_xname(sc->sc_dev));
   8505 			else if (status & WTX_ST_EC) {
   8506 				ifp->if_collisions +=
   8507 				    TX_COLLISION_THRESHOLD + 1;
   8508 				log(LOG_WARNING, "%s: excessive collisions\n",
   8509 				    device_xname(sc->sc_dev));
   8510 			}
   8511 		} else
   8512 			ifp->if_opackets++;
   8513 
   8514 		txq->txq_packets++;
   8515 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8516 
   8517 		txq->txq_free += txs->txs_ndesc;
   8518 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8519 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8520 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8521 		m_freem(txs->txs_mbuf);
   8522 		txs->txs_mbuf = NULL;
   8523 	}
   8524 
   8525 	/* Update the dirty transmit buffer pointer. */
   8526 	txq->txq_sdirty = i;
   8527 	DPRINTF(WM_DEBUG_TX,
   8528 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8529 
   8530 	if (count != 0)
   8531 		rnd_add_uint32(&sc->rnd_source, count);
   8532 
   8533 	/*
   8534 	 * If there are no more pending transmissions, cancel the watchdog
   8535 	 * timer.
   8536 	 */
   8537 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8538 		txq->txq_sending = false;
   8539 
   8540 	return more;
   8541 }
   8542 
   8543 static inline uint32_t
   8544 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8545 {
   8546 	struct wm_softc *sc = rxq->rxq_sc;
   8547 
   8548 	if (sc->sc_type == WM_T_82574)
   8549 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8550 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8551 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8552 	else
   8553 		return rxq->rxq_descs[idx].wrx_status;
   8554 }
   8555 
   8556 static inline uint32_t
   8557 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8558 {
   8559 	struct wm_softc *sc = rxq->rxq_sc;
   8560 
   8561 	if (sc->sc_type == WM_T_82574)
   8562 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8563 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8564 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8565 	else
   8566 		return rxq->rxq_descs[idx].wrx_errors;
   8567 }
   8568 
   8569 static inline uint16_t
   8570 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8571 {
   8572 	struct wm_softc *sc = rxq->rxq_sc;
   8573 
   8574 	if (sc->sc_type == WM_T_82574)
   8575 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8576 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8577 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8578 	else
   8579 		return rxq->rxq_descs[idx].wrx_special;
   8580 }
   8581 
   8582 static inline int
   8583 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8584 {
   8585 	struct wm_softc *sc = rxq->rxq_sc;
   8586 
   8587 	if (sc->sc_type == WM_T_82574)
   8588 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8589 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8590 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8591 	else
   8592 		return rxq->rxq_descs[idx].wrx_len;
   8593 }
   8594 
   8595 #ifdef WM_DEBUG
   8596 static inline uint32_t
   8597 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8598 {
   8599 	struct wm_softc *sc = rxq->rxq_sc;
   8600 
   8601 	if (sc->sc_type == WM_T_82574)
   8602 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8603 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8604 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8605 	else
   8606 		return 0;
   8607 }
   8608 
   8609 static inline uint8_t
   8610 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8611 {
   8612 	struct wm_softc *sc = rxq->rxq_sc;
   8613 
   8614 	if (sc->sc_type == WM_T_82574)
   8615 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8616 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8617 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8618 	else
   8619 		return 0;
   8620 }
   8621 #endif /* WM_DEBUG */
   8622 
   8623 static inline bool
   8624 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8625     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8626 {
   8627 
   8628 	if (sc->sc_type == WM_T_82574)
   8629 		return (status & ext_bit) != 0;
   8630 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8631 		return (status & nq_bit) != 0;
   8632 	else
   8633 		return (status & legacy_bit) != 0;
   8634 }
   8635 
   8636 static inline bool
   8637 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8638     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8639 {
   8640 
   8641 	if (sc->sc_type == WM_T_82574)
   8642 		return (error & ext_bit) != 0;
   8643 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8644 		return (error & nq_bit) != 0;
   8645 	else
   8646 		return (error & legacy_bit) != 0;
   8647 }
   8648 
   8649 static inline bool
   8650 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8651 {
   8652 
   8653 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8654 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8655 		return true;
   8656 	else
   8657 		return false;
   8658 }
   8659 
   8660 static inline bool
   8661 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8662 {
   8663 	struct wm_softc *sc = rxq->rxq_sc;
   8664 
   8665 	/* XXXX missing error bit for newqueue? */
   8666 	if (wm_rxdesc_is_set_error(sc, errors,
   8667 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8668 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8669 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8670 		NQRXC_ERROR_RXE)) {
   8671 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8672 		    EXTRXC_ERROR_SE, 0))
   8673 			log(LOG_WARNING, "%s: symbol error\n",
   8674 			    device_xname(sc->sc_dev));
   8675 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8676 		    EXTRXC_ERROR_SEQ, 0))
   8677 			log(LOG_WARNING, "%s: receive sequence error\n",
   8678 			    device_xname(sc->sc_dev));
   8679 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8680 		    EXTRXC_ERROR_CE, 0))
   8681 			log(LOG_WARNING, "%s: CRC error\n",
   8682 			    device_xname(sc->sc_dev));
   8683 		return true;
   8684 	}
   8685 
   8686 	return false;
   8687 }
   8688 
   8689 static inline bool
   8690 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8691 {
   8692 	struct wm_softc *sc = rxq->rxq_sc;
   8693 
   8694 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8695 		NQRXC_STATUS_DD)) {
   8696 		/* We have processed all of the receive descriptors. */
   8697 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8698 		return false;
   8699 	}
   8700 
   8701 	return true;
   8702 }
   8703 
   8704 static inline bool
   8705 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8706     uint16_t vlantag, struct mbuf *m)
   8707 {
   8708 
   8709 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8710 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8711 		vlan_set_tag(m, le16toh(vlantag));
   8712 	}
   8713 
   8714 	return true;
   8715 }
   8716 
   8717 static inline void
   8718 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8719     uint32_t errors, struct mbuf *m)
   8720 {
   8721 	struct wm_softc *sc = rxq->rxq_sc;
   8722 
   8723 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8724 		if (wm_rxdesc_is_set_status(sc, status,
   8725 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8726 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8727 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8728 			if (wm_rxdesc_is_set_error(sc, errors,
   8729 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8730 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8731 		}
   8732 		if (wm_rxdesc_is_set_status(sc, status,
   8733 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8734 			/*
   8735 			 * Note: we don't know if this was TCP or UDP,
   8736 			 * so we just set both bits, and expect the
   8737 			 * upper layers to deal.
   8738 			 */
   8739 			WM_Q_EVCNT_INCR(rxq, tusum);
   8740 			m->m_pkthdr.csum_flags |=
   8741 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8742 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8743 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8744 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8745 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8746 		}
   8747 	}
   8748 }
   8749 
   8750 /*
   8751  * wm_rxeof:
   8752  *
   8753  *	Helper; handle receive interrupts.
   8754  */
   8755 static bool
   8756 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8757 {
   8758 	struct wm_softc *sc = rxq->rxq_sc;
   8759 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8760 	struct wm_rxsoft *rxs;
   8761 	struct mbuf *m;
   8762 	int i, len;
   8763 	int count = 0;
   8764 	uint32_t status, errors;
   8765 	uint16_t vlantag;
   8766 	bool more = false;
   8767 
   8768 	KASSERT(mutex_owned(rxq->rxq_lock));
   8769 
   8770 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8771 		if (limit-- == 0) {
   8772 			rxq->rxq_ptr = i;
   8773 			more = true;
   8774 			DPRINTF(WM_DEBUG_RX,
   8775 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8776 				device_xname(sc->sc_dev), i));
   8777 			break;
   8778 		}
   8779 
   8780 		rxs = &rxq->rxq_soft[i];
   8781 
   8782 		DPRINTF(WM_DEBUG_RX,
   8783 		    ("%s: RX: checking descriptor %d\n",
   8784 			device_xname(sc->sc_dev), i));
   8785 		wm_cdrxsync(rxq, i,
   8786 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8787 
   8788 		status = wm_rxdesc_get_status(rxq, i);
   8789 		errors = wm_rxdesc_get_errors(rxq, i);
   8790 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8791 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8792 #ifdef WM_DEBUG
   8793 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8794 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8795 #endif
   8796 
   8797 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8798 			/*
   8799 			 * Update the receive pointer holding rxq_lock
   8800 			 * consistent with increment counter.
   8801 			 */
   8802 			rxq->rxq_ptr = i;
   8803 			break;
   8804 		}
   8805 
   8806 		count++;
   8807 		if (__predict_false(rxq->rxq_discard)) {
   8808 			DPRINTF(WM_DEBUG_RX,
   8809 			    ("%s: RX: discarding contents of descriptor %d\n",
   8810 				device_xname(sc->sc_dev), i));
   8811 			wm_init_rxdesc(rxq, i);
   8812 			if (wm_rxdesc_is_eop(rxq, status)) {
   8813 				/* Reset our state. */
   8814 				DPRINTF(WM_DEBUG_RX,
   8815 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8816 					device_xname(sc->sc_dev)));
   8817 				rxq->rxq_discard = 0;
   8818 			}
   8819 			continue;
   8820 		}
   8821 
   8822 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8823 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8824 
   8825 		m = rxs->rxs_mbuf;
   8826 
   8827 		/*
   8828 		 * Add a new receive buffer to the ring, unless of
   8829 		 * course the length is zero. Treat the latter as a
   8830 		 * failed mapping.
   8831 		 */
   8832 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8833 			/*
   8834 			 * Failed, throw away what we've done so
   8835 			 * far, and discard the rest of the packet.
   8836 			 */
   8837 			ifp->if_ierrors++;
   8838 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8839 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8840 			wm_init_rxdesc(rxq, i);
   8841 			if (!wm_rxdesc_is_eop(rxq, status))
   8842 				rxq->rxq_discard = 1;
   8843 			if (rxq->rxq_head != NULL)
   8844 				m_freem(rxq->rxq_head);
   8845 			WM_RXCHAIN_RESET(rxq);
   8846 			DPRINTF(WM_DEBUG_RX,
   8847 			    ("%s: RX: Rx buffer allocation failed, "
   8848 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8849 				rxq->rxq_discard ? " (discard)" : ""));
   8850 			continue;
   8851 		}
   8852 
   8853 		m->m_len = len;
   8854 		rxq->rxq_len += len;
   8855 		DPRINTF(WM_DEBUG_RX,
   8856 		    ("%s: RX: buffer at %p len %d\n",
   8857 			device_xname(sc->sc_dev), m->m_data, len));
   8858 
   8859 		/* If this is not the end of the packet, keep looking. */
   8860 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8861 			WM_RXCHAIN_LINK(rxq, m);
   8862 			DPRINTF(WM_DEBUG_RX,
   8863 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8864 				device_xname(sc->sc_dev), rxq->rxq_len));
   8865 			continue;
   8866 		}
   8867 
   8868 		/*
   8869 		 * Okay, we have the entire packet now. The chip is
   8870 		 * configured to include the FCS except I350 and I21[01]
   8871 		 * (not all chips can be configured to strip it),
   8872 		 * so we need to trim it.
   8873 		 * May need to adjust length of previous mbuf in the
   8874 		 * chain if the current mbuf is too short.
   8875 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8876 		 * is always set in I350, so we don't trim it.
   8877 		 */
   8878 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8879 		    && (sc->sc_type != WM_T_I210)
   8880 		    && (sc->sc_type != WM_T_I211)) {
   8881 			if (m->m_len < ETHER_CRC_LEN) {
   8882 				rxq->rxq_tail->m_len
   8883 				    -= (ETHER_CRC_LEN - m->m_len);
   8884 				m->m_len = 0;
   8885 			} else
   8886 				m->m_len -= ETHER_CRC_LEN;
   8887 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8888 		} else
   8889 			len = rxq->rxq_len;
   8890 
   8891 		WM_RXCHAIN_LINK(rxq, m);
   8892 
   8893 		*rxq->rxq_tailp = NULL;
   8894 		m = rxq->rxq_head;
   8895 
   8896 		WM_RXCHAIN_RESET(rxq);
   8897 
   8898 		DPRINTF(WM_DEBUG_RX,
   8899 		    ("%s: RX: have entire packet, len -> %d\n",
   8900 			device_xname(sc->sc_dev), len));
   8901 
   8902 		/* If an error occurred, update stats and drop the packet. */
   8903 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8904 			m_freem(m);
   8905 			continue;
   8906 		}
   8907 
   8908 		/* No errors.  Receive the packet. */
   8909 		m_set_rcvif(m, ifp);
   8910 		m->m_pkthdr.len = len;
   8911 		/*
   8912 		 * TODO
   8913 		 * should be save rsshash and rsstype to this mbuf.
   8914 		 */
   8915 		DPRINTF(WM_DEBUG_RX,
   8916 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8917 			device_xname(sc->sc_dev), rsstype, rsshash));
   8918 
   8919 		/*
   8920 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8921 		 * for us.  Associate the tag with the packet.
   8922 		 */
   8923 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8924 			continue;
   8925 
   8926 		/* Set up checksum info for this packet. */
   8927 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8928 		/*
   8929 		 * Update the receive pointer holding rxq_lock consistent with
   8930 		 * increment counter.
   8931 		 */
   8932 		rxq->rxq_ptr = i;
   8933 		rxq->rxq_packets++;
   8934 		rxq->rxq_bytes += len;
   8935 		mutex_exit(rxq->rxq_lock);
   8936 
   8937 		/* Pass it on. */
   8938 		if_percpuq_enqueue(sc->sc_ipq, m);
   8939 
   8940 		mutex_enter(rxq->rxq_lock);
   8941 
   8942 		if (rxq->rxq_stopping)
   8943 			break;
   8944 	}
   8945 
   8946 	if (count != 0)
   8947 		rnd_add_uint32(&sc->rnd_source, count);
   8948 
   8949 	DPRINTF(WM_DEBUG_RX,
   8950 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8951 
   8952 	return more;
   8953 }
   8954 
   8955 /*
   8956  * wm_linkintr_gmii:
   8957  *
   8958  *	Helper; handle link interrupts for GMII.
   8959  */
   8960 static void
   8961 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8962 {
   8963 	device_t dev = sc->sc_dev;
   8964 	uint32_t status, reg;
   8965 	bool link;
   8966 	int rv;
   8967 
   8968 	KASSERT(WM_CORE_LOCKED(sc));
   8969 
   8970 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   8971 		__func__));
   8972 
   8973 	if ((icr & ICR_LSC) == 0) {
   8974 		if (icr & ICR_RXSEQ)
   8975 			DPRINTF(WM_DEBUG_LINK,
   8976 			    ("%s: LINK Receive sequence error\n",
   8977 				device_xname(dev)));
   8978 		return;
   8979 	}
   8980 
   8981 	/* Link status changed */
   8982 	status = CSR_READ(sc, WMREG_STATUS);
   8983 	link = status & STATUS_LU;
   8984 	if (link)
   8985 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8986 			device_xname(dev),
   8987 			(status & STATUS_FD) ? "FDX" : "HDX"));
   8988 	else
   8989 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8990 			device_xname(dev)));
   8991 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   8992 		wm_gig_downshift_workaround_ich8lan(sc);
   8993 
   8994 	if ((sc->sc_type == WM_T_ICH8)
   8995 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8996 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8997 	}
   8998 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8999 		device_xname(dev)));
   9000 	mii_pollstat(&sc->sc_mii);
   9001 	if (sc->sc_type == WM_T_82543) {
   9002 		int miistatus, active;
   9003 
   9004 		/*
   9005 		 * With 82543, we need to force speed and
   9006 		 * duplex on the MAC equal to what the PHY
   9007 		 * speed and duplex configuration is.
   9008 		 */
   9009 		miistatus = sc->sc_mii.mii_media_status;
   9010 
   9011 		if (miistatus & IFM_ACTIVE) {
   9012 			active = sc->sc_mii.mii_media_active;
   9013 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9014 			switch (IFM_SUBTYPE(active)) {
   9015 			case IFM_10_T:
   9016 				sc->sc_ctrl |= CTRL_SPEED_10;
   9017 				break;
   9018 			case IFM_100_TX:
   9019 				sc->sc_ctrl |= CTRL_SPEED_100;
   9020 				break;
   9021 			case IFM_1000_T:
   9022 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9023 				break;
   9024 			default:
   9025 				/*
   9026 				 * fiber?
   9027 				 * Shoud not enter here.
   9028 				 */
   9029 				printf("unknown media (%x)\n", active);
   9030 				break;
   9031 			}
   9032 			if (active & IFM_FDX)
   9033 				sc->sc_ctrl |= CTRL_FD;
   9034 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9035 		}
   9036 	} else if (sc->sc_type == WM_T_PCH) {
   9037 		wm_k1_gig_workaround_hv(sc,
   9038 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9039 	}
   9040 
   9041 	/*
   9042 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9043 	 * aggressive resulting in many collisions. To avoid this, increase
   9044 	 * the IPG and reduce Rx latency in the PHY.
   9045 	 */
   9046 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9047 	    && link) {
   9048 		uint32_t tipg_reg;
   9049 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9050 		bool fdx;
   9051 		uint16_t emi_addr, emi_val;
   9052 
   9053 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9054 		tipg_reg &= ~TIPG_IPGT_MASK;
   9055 		fdx = status & STATUS_FD;
   9056 
   9057 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9058 			tipg_reg |= 0xff;
   9059 			/* Reduce Rx latency in analog PHY */
   9060 			emi_val = 0;
   9061 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9062 		    fdx && speed != STATUS_SPEED_1000) {
   9063 			tipg_reg |= 0xc;
   9064 			emi_val = 1;
   9065 		} else {
   9066 			/* Roll back the default values */
   9067 			tipg_reg |= 0x08;
   9068 			emi_val = 1;
   9069 		}
   9070 
   9071 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9072 
   9073 		rv = sc->phy.acquire(sc);
   9074 		if (rv)
   9075 			return;
   9076 
   9077 		if (sc->sc_type == WM_T_PCH2)
   9078 			emi_addr = I82579_RX_CONFIG;
   9079 		else
   9080 			emi_addr = I217_RX_CONFIG;
   9081 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9082 
   9083 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9084 			uint16_t phy_reg;
   9085 
   9086 			sc->phy.readreg_locked(dev, 2,
   9087 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9088 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9089 			if (speed == STATUS_SPEED_100
   9090 			    || speed == STATUS_SPEED_10)
   9091 				phy_reg |= 0x3e8;
   9092 			else
   9093 				phy_reg |= 0xfa;
   9094 			sc->phy.writereg_locked(dev, 2,
   9095 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9096 
   9097 			if (speed == STATUS_SPEED_1000) {
   9098 				sc->phy.readreg_locked(dev, 2,
   9099 				    HV_PM_CTRL, &phy_reg);
   9100 
   9101 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9102 
   9103 				sc->phy.writereg_locked(dev, 2,
   9104 				    HV_PM_CTRL, phy_reg);
   9105 			}
   9106 		}
   9107 		sc->phy.release(sc);
   9108 
   9109 		if (rv)
   9110 			return;
   9111 
   9112 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9113 			uint16_t data, ptr_gap;
   9114 
   9115 			if (speed == STATUS_SPEED_1000) {
   9116 				rv = sc->phy.acquire(sc);
   9117 				if (rv)
   9118 					return;
   9119 
   9120 				rv = sc->phy.readreg_locked(dev, 2,
   9121 				    I219_UNKNOWN1, &data);
   9122 				if (rv) {
   9123 					sc->phy.release(sc);
   9124 					return;
   9125 				}
   9126 
   9127 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9128 				if (ptr_gap < 0x18) {
   9129 					data &= ~(0x3ff << 2);
   9130 					data |= (0x18 << 2);
   9131 					rv = sc->phy.writereg_locked(dev,
   9132 					    2, I219_UNKNOWN1, data);
   9133 				}
   9134 				sc->phy.release(sc);
   9135 				if (rv)
   9136 					return;
   9137 			} else {
   9138 				rv = sc->phy.acquire(sc);
   9139 				if (rv)
   9140 					return;
   9141 
   9142 				rv = sc->phy.writereg_locked(dev, 2,
   9143 				    I219_UNKNOWN1, 0xc023);
   9144 				sc->phy.release(sc);
   9145 				if (rv)
   9146 					return;
   9147 
   9148 			}
   9149 		}
   9150 	}
   9151 
   9152 	/*
   9153 	 * I217 Packet Loss issue:
   9154 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9155 	 * on power up.
   9156 	 * Set the Beacon Duration for I217 to 8 usec
   9157 	 */
   9158 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9159 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9160 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9161 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9162 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9163 	}
   9164 
   9165 	/* Work-around I218 hang issue */
   9166 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9167 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9168 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9169 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9170 		wm_k1_workaround_lpt_lp(sc, link);
   9171 
   9172 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9173 		/*
   9174 		 * Set platform power management values for Latency
   9175 		 * Tolerance Reporting (LTR)
   9176 		 */
   9177 		wm_platform_pm_pch_lpt(sc,
   9178 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9179 	}
   9180 
   9181 	/* Clear link partner's EEE ability */
   9182 	sc->eee_lp_ability = 0;
   9183 
   9184 	/* FEXTNVM6 K1-off workaround */
   9185 	if (sc->sc_type == WM_T_PCH_SPT) {
   9186 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9187 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9188 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9189 		else
   9190 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9191 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9192 	}
   9193 
   9194 	if (!link)
   9195 		return;
   9196 
   9197 	switch (sc->sc_type) {
   9198 	case WM_T_PCH2:
   9199 		wm_k1_workaround_lv(sc);
   9200 		/* FALLTHROUGH */
   9201 	case WM_T_PCH:
   9202 		if (sc->sc_phytype == WMPHY_82578)
   9203 			wm_link_stall_workaround_hv(sc);
   9204 		break;
   9205 	default:
   9206 		break;
   9207 	}
   9208 
   9209 	/* Enable/Disable EEE after link up */
   9210 	if (sc->sc_phytype > WMPHY_82579)
   9211 		wm_set_eee_pchlan(sc);
   9212 }
   9213 
   9214 /*
   9215  * wm_linkintr_tbi:
   9216  *
   9217  *	Helper; handle link interrupts for TBI mode.
   9218  */
   9219 static void
   9220 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9221 {
   9222 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9223 	uint32_t status;
   9224 
   9225 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9226 		__func__));
   9227 
   9228 	status = CSR_READ(sc, WMREG_STATUS);
   9229 	if (icr & ICR_LSC) {
   9230 		wm_check_for_link(sc);
   9231 		if (status & STATUS_LU) {
   9232 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9233 				device_xname(sc->sc_dev),
   9234 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9235 			/*
   9236 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9237 			 * so we should update sc->sc_ctrl
   9238 			 */
   9239 
   9240 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9241 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9242 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9243 			if (status & STATUS_FD)
   9244 				sc->sc_tctl |=
   9245 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9246 			else
   9247 				sc->sc_tctl |=
   9248 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9249 			if (sc->sc_ctrl & CTRL_TFCE)
   9250 				sc->sc_fcrtl |= FCRTL_XONE;
   9251 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9252 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9253 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9254 			sc->sc_tbi_linkup = 1;
   9255 			if_link_state_change(ifp, LINK_STATE_UP);
   9256 		} else {
   9257 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9258 				device_xname(sc->sc_dev)));
   9259 			sc->sc_tbi_linkup = 0;
   9260 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9261 		}
   9262 		/* Update LED */
   9263 		wm_tbi_serdes_set_linkled(sc);
   9264 	} else if (icr & ICR_RXSEQ)
   9265 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9266 			device_xname(sc->sc_dev)));
   9267 }
   9268 
   9269 /*
   9270  * wm_linkintr_serdes:
   9271  *
   9272  *	Helper; handle link interrupts for TBI mode.
   9273  */
   9274 static void
   9275 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9276 {
   9277 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9278 	struct mii_data *mii = &sc->sc_mii;
   9279 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9280 	uint32_t pcs_adv, pcs_lpab, reg;
   9281 
   9282 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9283 		__func__));
   9284 
   9285 	if (icr & ICR_LSC) {
   9286 		/* Check PCS */
   9287 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9288 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9289 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9290 				device_xname(sc->sc_dev)));
   9291 			mii->mii_media_status |= IFM_ACTIVE;
   9292 			sc->sc_tbi_linkup = 1;
   9293 			if_link_state_change(ifp, LINK_STATE_UP);
   9294 		} else {
   9295 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9296 				device_xname(sc->sc_dev)));
   9297 			mii->mii_media_status |= IFM_NONE;
   9298 			sc->sc_tbi_linkup = 0;
   9299 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9300 			wm_tbi_serdes_set_linkled(sc);
   9301 			return;
   9302 		}
   9303 		mii->mii_media_active |= IFM_1000_SX;
   9304 		if ((reg & PCS_LSTS_FDX) != 0)
   9305 			mii->mii_media_active |= IFM_FDX;
   9306 		else
   9307 			mii->mii_media_active |= IFM_HDX;
   9308 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9309 			/* Check flow */
   9310 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9311 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9312 				DPRINTF(WM_DEBUG_LINK,
   9313 				    ("XXX LINKOK but not ACOMP\n"));
   9314 				return;
   9315 			}
   9316 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9317 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9318 			DPRINTF(WM_DEBUG_LINK,
   9319 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9320 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9321 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9322 				mii->mii_media_active |= IFM_FLOW
   9323 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9324 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9325 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9326 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9327 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9328 				mii->mii_media_active |= IFM_FLOW
   9329 				    | IFM_ETH_TXPAUSE;
   9330 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9331 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9332 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9333 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9334 				mii->mii_media_active |= IFM_FLOW
   9335 				    | IFM_ETH_RXPAUSE;
   9336 		}
   9337 		/* Update LED */
   9338 		wm_tbi_serdes_set_linkled(sc);
   9339 	} else
   9340 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9341 		    device_xname(sc->sc_dev)));
   9342 }
   9343 
   9344 /*
   9345  * wm_linkintr:
   9346  *
   9347  *	Helper; handle link interrupts.
   9348  */
   9349 static void
   9350 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9351 {
   9352 
   9353 	KASSERT(WM_CORE_LOCKED(sc));
   9354 
   9355 	if (sc->sc_flags & WM_F_HAS_MII)
   9356 		wm_linkintr_gmii(sc, icr);
   9357 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9358 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9359 		wm_linkintr_serdes(sc, icr);
   9360 	else
   9361 		wm_linkintr_tbi(sc, icr);
   9362 }
   9363 
   9364 /*
   9365  * wm_intr_legacy:
   9366  *
   9367  *	Interrupt service routine for INTx and MSI.
   9368  */
   9369 static int
   9370 wm_intr_legacy(void *arg)
   9371 {
   9372 	struct wm_softc *sc = arg;
   9373 	struct wm_queue *wmq = &sc->sc_queue[0];
   9374 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9375 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9376 	uint32_t icr, rndval = 0;
   9377 	int handled = 0;
   9378 
   9379 	while (1 /* CONSTCOND */) {
   9380 		icr = CSR_READ(sc, WMREG_ICR);
   9381 		if ((icr & sc->sc_icr) == 0)
   9382 			break;
   9383 		if (handled == 0)
   9384 			DPRINTF(WM_DEBUG_TX,
   9385 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9386 		if (rndval == 0)
   9387 			rndval = icr;
   9388 
   9389 		mutex_enter(rxq->rxq_lock);
   9390 
   9391 		if (rxq->rxq_stopping) {
   9392 			mutex_exit(rxq->rxq_lock);
   9393 			break;
   9394 		}
   9395 
   9396 		handled = 1;
   9397 
   9398 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9399 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9400 			DPRINTF(WM_DEBUG_RX,
   9401 			    ("%s: RX: got Rx intr 0x%08x\n",
   9402 				device_xname(sc->sc_dev),
   9403 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9404 			WM_Q_EVCNT_INCR(rxq, intr);
   9405 		}
   9406 #endif
   9407 		/*
   9408 		 * wm_rxeof() does *not* call upper layer functions directly,
   9409 		 * as if_percpuq_enqueue() just call softint_schedule().
   9410 		 * So, we can call wm_rxeof() in interrupt context.
   9411 		 */
   9412 		wm_rxeof(rxq, UINT_MAX);
   9413 
   9414 		mutex_exit(rxq->rxq_lock);
   9415 		mutex_enter(txq->txq_lock);
   9416 
   9417 		if (txq->txq_stopping) {
   9418 			mutex_exit(txq->txq_lock);
   9419 			break;
   9420 		}
   9421 
   9422 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9423 		if (icr & ICR_TXDW) {
   9424 			DPRINTF(WM_DEBUG_TX,
   9425 			    ("%s: TX: got TXDW interrupt\n",
   9426 				device_xname(sc->sc_dev)));
   9427 			WM_Q_EVCNT_INCR(txq, txdw);
   9428 		}
   9429 #endif
   9430 		wm_txeof(txq, UINT_MAX);
   9431 
   9432 		mutex_exit(txq->txq_lock);
   9433 		WM_CORE_LOCK(sc);
   9434 
   9435 		if (sc->sc_core_stopping) {
   9436 			WM_CORE_UNLOCK(sc);
   9437 			break;
   9438 		}
   9439 
   9440 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9441 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9442 			wm_linkintr(sc, icr);
   9443 		}
   9444 
   9445 		WM_CORE_UNLOCK(sc);
   9446 
   9447 		if (icr & ICR_RXO) {
   9448 #if defined(WM_DEBUG)
   9449 			log(LOG_WARNING, "%s: Receive overrun\n",
   9450 			    device_xname(sc->sc_dev));
   9451 #endif /* defined(WM_DEBUG) */
   9452 		}
   9453 	}
   9454 
   9455 	rnd_add_uint32(&sc->rnd_source, rndval);
   9456 
   9457 	if (handled) {
   9458 		/* Try to get more packets going. */
   9459 		softint_schedule(wmq->wmq_si);
   9460 	}
   9461 
   9462 	return handled;
   9463 }
   9464 
   9465 static inline void
   9466 wm_txrxintr_disable(struct wm_queue *wmq)
   9467 {
   9468 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9469 
   9470 	if (sc->sc_type == WM_T_82574)
   9471 		CSR_WRITE(sc, WMREG_IMC,
   9472 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9473 	else if (sc->sc_type == WM_T_82575)
   9474 		CSR_WRITE(sc, WMREG_EIMC,
   9475 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9476 	else
   9477 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9478 }
   9479 
   9480 static inline void
   9481 wm_txrxintr_enable(struct wm_queue *wmq)
   9482 {
   9483 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9484 
   9485 	wm_itrs_calculate(sc, wmq);
   9486 
   9487 	/*
   9488 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9489 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9490 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9491 	 * while each wm_handle_queue(wmq) is runnig.
   9492 	 */
   9493 	if (sc->sc_type == WM_T_82574)
   9494 		CSR_WRITE(sc, WMREG_IMS,
   9495 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9496 	else if (sc->sc_type == WM_T_82575)
   9497 		CSR_WRITE(sc, WMREG_EIMS,
   9498 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9499 	else
   9500 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9501 }
   9502 
   9503 static int
   9504 wm_txrxintr_msix(void *arg)
   9505 {
   9506 	struct wm_queue *wmq = arg;
   9507 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9508 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9509 	struct wm_softc *sc = txq->txq_sc;
   9510 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9511 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9512 	bool txmore;
   9513 	bool rxmore;
   9514 
   9515 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9516 
   9517 	DPRINTF(WM_DEBUG_TX,
   9518 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9519 
   9520 	wm_txrxintr_disable(wmq);
   9521 
   9522 	mutex_enter(txq->txq_lock);
   9523 
   9524 	if (txq->txq_stopping) {
   9525 		mutex_exit(txq->txq_lock);
   9526 		return 0;
   9527 	}
   9528 
   9529 	WM_Q_EVCNT_INCR(txq, txdw);
   9530 	txmore = wm_txeof(txq, txlimit);
   9531 	/* wm_deferred start() is done in wm_handle_queue(). */
   9532 	mutex_exit(txq->txq_lock);
   9533 
   9534 	DPRINTF(WM_DEBUG_RX,
   9535 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9536 	mutex_enter(rxq->rxq_lock);
   9537 
   9538 	if (rxq->rxq_stopping) {
   9539 		mutex_exit(rxq->rxq_lock);
   9540 		return 0;
   9541 	}
   9542 
   9543 	WM_Q_EVCNT_INCR(rxq, intr);
   9544 	rxmore = wm_rxeof(rxq, rxlimit);
   9545 	mutex_exit(rxq->rxq_lock);
   9546 
   9547 	wm_itrs_writereg(sc, wmq);
   9548 
   9549 	if (txmore || rxmore)
   9550 		softint_schedule(wmq->wmq_si);
   9551 	else
   9552 		wm_txrxintr_enable(wmq);
   9553 
   9554 	return 1;
   9555 }
   9556 
   9557 static void
   9558 wm_handle_queue(void *arg)
   9559 {
   9560 	struct wm_queue *wmq = arg;
   9561 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9562 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9563 	struct wm_softc *sc = txq->txq_sc;
   9564 	u_int txlimit = sc->sc_tx_process_limit;
   9565 	u_int rxlimit = sc->sc_rx_process_limit;
   9566 	bool txmore;
   9567 	bool rxmore;
   9568 
   9569 	mutex_enter(txq->txq_lock);
   9570 	if (txq->txq_stopping) {
   9571 		mutex_exit(txq->txq_lock);
   9572 		return;
   9573 	}
   9574 	txmore = wm_txeof(txq, txlimit);
   9575 	wm_deferred_start_locked(txq);
   9576 	mutex_exit(txq->txq_lock);
   9577 
   9578 	mutex_enter(rxq->rxq_lock);
   9579 	if (rxq->rxq_stopping) {
   9580 		mutex_exit(rxq->rxq_lock);
   9581 		return;
   9582 	}
   9583 	WM_Q_EVCNT_INCR(rxq, defer);
   9584 	rxmore = wm_rxeof(rxq, rxlimit);
   9585 	mutex_exit(rxq->rxq_lock);
   9586 
   9587 	if (txmore || rxmore)
   9588 		softint_schedule(wmq->wmq_si);
   9589 	else
   9590 		wm_txrxintr_enable(wmq);
   9591 }
   9592 
   9593 /*
   9594  * wm_linkintr_msix:
   9595  *
   9596  *	Interrupt service routine for link status change for MSI-X.
   9597  */
   9598 static int
   9599 wm_linkintr_msix(void *arg)
   9600 {
   9601 	struct wm_softc *sc = arg;
   9602 	uint32_t reg;
   9603 	bool has_rxo;
   9604 
   9605 	DPRINTF(WM_DEBUG_LINK,
   9606 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9607 
   9608 	reg = CSR_READ(sc, WMREG_ICR);
   9609 	WM_CORE_LOCK(sc);
   9610 	if (sc->sc_core_stopping)
   9611 		goto out;
   9612 
   9613 	if ((reg & ICR_LSC) != 0) {
   9614 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9615 		wm_linkintr(sc, ICR_LSC);
   9616 	}
   9617 
   9618 	/*
   9619 	 * XXX 82574 MSI-X mode workaround
   9620 	 *
   9621 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9622 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9623 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9624 	 * interrupts by writing WMREG_ICS to process receive packets.
   9625 	 */
   9626 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9627 #if defined(WM_DEBUG)
   9628 		log(LOG_WARNING, "%s: Receive overrun\n",
   9629 		    device_xname(sc->sc_dev));
   9630 #endif /* defined(WM_DEBUG) */
   9631 
   9632 		has_rxo = true;
   9633 		/*
   9634 		 * The RXO interrupt is very high rate when receive traffic is
   9635 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9636 		 * interrupts. ICR_OTHER will be enabled at the end of
   9637 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9638 		 * ICR_RXQ(1) interrupts.
   9639 		 */
   9640 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9641 
   9642 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9643 	}
   9644 
   9645 
   9646 
   9647 out:
   9648 	WM_CORE_UNLOCK(sc);
   9649 
   9650 	if (sc->sc_type == WM_T_82574) {
   9651 		if (!has_rxo)
   9652 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9653 		else
   9654 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9655 	} else if (sc->sc_type == WM_T_82575)
   9656 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9657 	else
   9658 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9659 
   9660 	return 1;
   9661 }
   9662 
   9663 /*
   9664  * Media related.
   9665  * GMII, SGMII, TBI (and SERDES)
   9666  */
   9667 
   9668 /* Common */
   9669 
   9670 /*
   9671  * wm_tbi_serdes_set_linkled:
   9672  *
   9673  *	Update the link LED on TBI and SERDES devices.
   9674  */
   9675 static void
   9676 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9677 {
   9678 
   9679 	if (sc->sc_tbi_linkup)
   9680 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9681 	else
   9682 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9683 
   9684 	/* 82540 or newer devices are active low */
   9685 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9686 
   9687 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9688 }
   9689 
   9690 /* GMII related */
   9691 
   9692 /*
   9693  * wm_gmii_reset:
   9694  *
   9695  *	Reset the PHY.
   9696  */
   9697 static void
   9698 wm_gmii_reset(struct wm_softc *sc)
   9699 {
   9700 	uint32_t reg;
   9701 	int rv;
   9702 
   9703 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9704 		device_xname(sc->sc_dev), __func__));
   9705 
   9706 	rv = sc->phy.acquire(sc);
   9707 	if (rv != 0) {
   9708 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9709 		    __func__);
   9710 		return;
   9711 	}
   9712 
   9713 	switch (sc->sc_type) {
   9714 	case WM_T_82542_2_0:
   9715 	case WM_T_82542_2_1:
   9716 		/* null */
   9717 		break;
   9718 	case WM_T_82543:
   9719 		/*
   9720 		 * With 82543, we need to force speed and duplex on the MAC
   9721 		 * equal to what the PHY speed and duplex configuration is.
   9722 		 * In addition, we need to perform a hardware reset on the PHY
   9723 		 * to take it out of reset.
   9724 		 */
   9725 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9726 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9727 
   9728 		/* The PHY reset pin is active-low. */
   9729 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9730 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9731 		    CTRL_EXT_SWDPIN(4));
   9732 		reg |= CTRL_EXT_SWDPIO(4);
   9733 
   9734 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9735 		CSR_WRITE_FLUSH(sc);
   9736 		delay(10*1000);
   9737 
   9738 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9739 		CSR_WRITE_FLUSH(sc);
   9740 		delay(150);
   9741 #if 0
   9742 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9743 #endif
   9744 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9745 		break;
   9746 	case WM_T_82544:	/* reset 10000us */
   9747 	case WM_T_82540:
   9748 	case WM_T_82545:
   9749 	case WM_T_82545_3:
   9750 	case WM_T_82546:
   9751 	case WM_T_82546_3:
   9752 	case WM_T_82541:
   9753 	case WM_T_82541_2:
   9754 	case WM_T_82547:
   9755 	case WM_T_82547_2:
   9756 	case WM_T_82571:	/* reset 100us */
   9757 	case WM_T_82572:
   9758 	case WM_T_82573:
   9759 	case WM_T_82574:
   9760 	case WM_T_82575:
   9761 	case WM_T_82576:
   9762 	case WM_T_82580:
   9763 	case WM_T_I350:
   9764 	case WM_T_I354:
   9765 	case WM_T_I210:
   9766 	case WM_T_I211:
   9767 	case WM_T_82583:
   9768 	case WM_T_80003:
   9769 		/* generic reset */
   9770 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9771 		CSR_WRITE_FLUSH(sc);
   9772 		delay(20000);
   9773 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9774 		CSR_WRITE_FLUSH(sc);
   9775 		delay(20000);
   9776 
   9777 		if ((sc->sc_type == WM_T_82541)
   9778 		    || (sc->sc_type == WM_T_82541_2)
   9779 		    || (sc->sc_type == WM_T_82547)
   9780 		    || (sc->sc_type == WM_T_82547_2)) {
   9781 			/* workaround for igp are done in igp_reset() */
   9782 			/* XXX add code to set LED after phy reset */
   9783 		}
   9784 		break;
   9785 	case WM_T_ICH8:
   9786 	case WM_T_ICH9:
   9787 	case WM_T_ICH10:
   9788 	case WM_T_PCH:
   9789 	case WM_T_PCH2:
   9790 	case WM_T_PCH_LPT:
   9791 	case WM_T_PCH_SPT:
   9792 	case WM_T_PCH_CNP:
   9793 		/* generic reset */
   9794 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9795 		CSR_WRITE_FLUSH(sc);
   9796 		delay(100);
   9797 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9798 		CSR_WRITE_FLUSH(sc);
   9799 		delay(150);
   9800 		break;
   9801 	default:
   9802 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9803 		    __func__);
   9804 		break;
   9805 	}
   9806 
   9807 	sc->phy.release(sc);
   9808 
   9809 	/* get_cfg_done */
   9810 	wm_get_cfg_done(sc);
   9811 
   9812 	/* extra setup */
   9813 	switch (sc->sc_type) {
   9814 	case WM_T_82542_2_0:
   9815 	case WM_T_82542_2_1:
   9816 	case WM_T_82543:
   9817 	case WM_T_82544:
   9818 	case WM_T_82540:
   9819 	case WM_T_82545:
   9820 	case WM_T_82545_3:
   9821 	case WM_T_82546:
   9822 	case WM_T_82546_3:
   9823 	case WM_T_82541_2:
   9824 	case WM_T_82547_2:
   9825 	case WM_T_82571:
   9826 	case WM_T_82572:
   9827 	case WM_T_82573:
   9828 	case WM_T_82574:
   9829 	case WM_T_82583:
   9830 	case WM_T_82575:
   9831 	case WM_T_82576:
   9832 	case WM_T_82580:
   9833 	case WM_T_I350:
   9834 	case WM_T_I354:
   9835 	case WM_T_I210:
   9836 	case WM_T_I211:
   9837 	case WM_T_80003:
   9838 		/* null */
   9839 		break;
   9840 	case WM_T_82541:
   9841 	case WM_T_82547:
   9842 		/* XXX Configure actively LED after PHY reset */
   9843 		break;
   9844 	case WM_T_ICH8:
   9845 	case WM_T_ICH9:
   9846 	case WM_T_ICH10:
   9847 	case WM_T_PCH:
   9848 	case WM_T_PCH2:
   9849 	case WM_T_PCH_LPT:
   9850 	case WM_T_PCH_SPT:
   9851 	case WM_T_PCH_CNP:
   9852 		wm_phy_post_reset(sc);
   9853 		break;
   9854 	default:
   9855 		panic("%s: unknown type\n", __func__);
   9856 		break;
   9857 	}
   9858 }
   9859 
   9860 /*
   9861  * Setup sc_phytype and mii_{read|write}reg.
   9862  *
   9863  *  To identify PHY type, correct read/write function should be selected.
   9864  * To select correct read/write function, PCI ID or MAC type are required
   9865  * without accessing PHY registers.
   9866  *
   9867  *  On the first call of this function, PHY ID is not known yet. Check
   9868  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9869  * result might be incorrect.
   9870  *
   9871  *  In the second call, PHY OUI and model is used to identify PHY type.
   9872  * It might not be perfpect because of the lack of compared entry, but it
   9873  * would be better than the first call.
   9874  *
   9875  *  If the detected new result and previous assumption is different,
   9876  * diagnous message will be printed.
   9877  */
   9878 static void
   9879 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9880     uint16_t phy_model)
   9881 {
   9882 	device_t dev = sc->sc_dev;
   9883 	struct mii_data *mii = &sc->sc_mii;
   9884 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9885 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9886 	mii_readreg_t new_readreg;
   9887 	mii_writereg_t new_writereg;
   9888 
   9889 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9890 		device_xname(sc->sc_dev), __func__));
   9891 
   9892 	if (mii->mii_readreg == NULL) {
   9893 		/*
   9894 		 *  This is the first call of this function. For ICH and PCH
   9895 		 * variants, it's difficult to determine the PHY access method
   9896 		 * by sc_type, so use the PCI product ID for some devices.
   9897 		 */
   9898 
   9899 		switch (sc->sc_pcidevid) {
   9900 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9901 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9902 			/* 82577 */
   9903 			new_phytype = WMPHY_82577;
   9904 			break;
   9905 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9906 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9907 			/* 82578 */
   9908 			new_phytype = WMPHY_82578;
   9909 			break;
   9910 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9911 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9912 			/* 82579 */
   9913 			new_phytype = WMPHY_82579;
   9914 			break;
   9915 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9916 		case PCI_PRODUCT_INTEL_82801I_BM:
   9917 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9918 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9919 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9920 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9921 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9922 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9923 			/* ICH8, 9, 10 with 82567 */
   9924 			new_phytype = WMPHY_BM;
   9925 			break;
   9926 		default:
   9927 			break;
   9928 		}
   9929 	} else {
   9930 		/* It's not the first call. Use PHY OUI and model */
   9931 		switch (phy_oui) {
   9932 		case MII_OUI_ATTANSIC: /* XXX ??? */
   9933 			switch (phy_model) {
   9934 			case 0x0004: /* XXX */
   9935 				new_phytype = WMPHY_82578;
   9936 				break;
   9937 			default:
   9938 				break;
   9939 			}
   9940 			break;
   9941 		case MII_OUI_xxMARVELL:
   9942 			switch (phy_model) {
   9943 			case MII_MODEL_xxMARVELL_I210:
   9944 				new_phytype = WMPHY_I210;
   9945 				break;
   9946 			case MII_MODEL_xxMARVELL_E1011:
   9947 			case MII_MODEL_xxMARVELL_E1000_3:
   9948 			case MII_MODEL_xxMARVELL_E1000_5:
   9949 			case MII_MODEL_xxMARVELL_E1112:
   9950 				new_phytype = WMPHY_M88;
   9951 				break;
   9952 			case MII_MODEL_xxMARVELL_E1149:
   9953 				new_phytype = WMPHY_BM;
   9954 				break;
   9955 			case MII_MODEL_xxMARVELL_E1111:
   9956 			case MII_MODEL_xxMARVELL_I347:
   9957 			case MII_MODEL_xxMARVELL_E1512:
   9958 			case MII_MODEL_xxMARVELL_E1340M:
   9959 			case MII_MODEL_xxMARVELL_E1543:
   9960 				new_phytype = WMPHY_M88;
   9961 				break;
   9962 			case MII_MODEL_xxMARVELL_I82563:
   9963 				new_phytype = WMPHY_GG82563;
   9964 				break;
   9965 			default:
   9966 				break;
   9967 			}
   9968 			break;
   9969 		case MII_OUI_INTEL:
   9970 			switch (phy_model) {
   9971 			case MII_MODEL_INTEL_I82577:
   9972 				new_phytype = WMPHY_82577;
   9973 				break;
   9974 			case MII_MODEL_INTEL_I82579:
   9975 				new_phytype = WMPHY_82579;
   9976 				break;
   9977 			case MII_MODEL_INTEL_I217:
   9978 				new_phytype = WMPHY_I217;
   9979 				break;
   9980 			case MII_MODEL_INTEL_I82580:
   9981 			case MII_MODEL_INTEL_I350:
   9982 				new_phytype = WMPHY_82580;
   9983 				break;
   9984 			default:
   9985 				break;
   9986 			}
   9987 			break;
   9988 		case MII_OUI_yyINTEL:
   9989 			switch (phy_model) {
   9990 			case MII_MODEL_yyINTEL_I82562G:
   9991 			case MII_MODEL_yyINTEL_I82562EM:
   9992 			case MII_MODEL_yyINTEL_I82562ET:
   9993 				new_phytype = WMPHY_IFE;
   9994 				break;
   9995 			case MII_MODEL_yyINTEL_IGP01E1000:
   9996 				new_phytype = WMPHY_IGP;
   9997 				break;
   9998 			case MII_MODEL_yyINTEL_I82566:
   9999 				new_phytype = WMPHY_IGP_3;
   10000 				break;
   10001 			default:
   10002 				break;
   10003 			}
   10004 			break;
   10005 		default:
   10006 			break;
   10007 		}
   10008 		if (new_phytype == WMPHY_UNKNOWN)
   10009 			aprint_verbose_dev(dev,
   10010 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
   10011 			    __func__, phy_oui, phy_model);
   10012 
   10013 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10014 		    && (sc->sc_phytype != new_phytype )) {
   10015 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10016 			    "was incorrect. PHY type from PHY ID = %u\n",
   10017 			    sc->sc_phytype, new_phytype);
   10018 		}
   10019 	}
   10020 
   10021 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10022 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10023 		/* SGMII */
   10024 		new_readreg = wm_sgmii_readreg;
   10025 		new_writereg = wm_sgmii_writereg;
   10026 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10027 		/* BM2 (phyaddr == 1) */
   10028 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10029 		    && (new_phytype != WMPHY_BM)
   10030 		    && (new_phytype != WMPHY_UNKNOWN))
   10031 			doubt_phytype = new_phytype;
   10032 		new_phytype = WMPHY_BM;
   10033 		new_readreg = wm_gmii_bm_readreg;
   10034 		new_writereg = wm_gmii_bm_writereg;
   10035 	} else if (sc->sc_type >= WM_T_PCH) {
   10036 		/* All PCH* use _hv_ */
   10037 		new_readreg = wm_gmii_hv_readreg;
   10038 		new_writereg = wm_gmii_hv_writereg;
   10039 	} else if (sc->sc_type >= WM_T_ICH8) {
   10040 		/* non-82567 ICH8, 9 and 10 */
   10041 		new_readreg = wm_gmii_i82544_readreg;
   10042 		new_writereg = wm_gmii_i82544_writereg;
   10043 	} else if (sc->sc_type >= WM_T_80003) {
   10044 		/* 80003 */
   10045 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10046 		    && (new_phytype != WMPHY_GG82563)
   10047 		    && (new_phytype != WMPHY_UNKNOWN))
   10048 			doubt_phytype = new_phytype;
   10049 		new_phytype = WMPHY_GG82563;
   10050 		new_readreg = wm_gmii_i80003_readreg;
   10051 		new_writereg = wm_gmii_i80003_writereg;
   10052 	} else if (sc->sc_type >= WM_T_I210) {
   10053 		/* I210 and I211 */
   10054 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10055 		    && (new_phytype != WMPHY_I210)
   10056 		    && (new_phytype != WMPHY_UNKNOWN))
   10057 			doubt_phytype = new_phytype;
   10058 		new_phytype = WMPHY_I210;
   10059 		new_readreg = wm_gmii_gs40g_readreg;
   10060 		new_writereg = wm_gmii_gs40g_writereg;
   10061 	} else if (sc->sc_type >= WM_T_82580) {
   10062 		/* 82580, I350 and I354 */
   10063 		new_readreg = wm_gmii_82580_readreg;
   10064 		new_writereg = wm_gmii_82580_writereg;
   10065 	} else if (sc->sc_type >= WM_T_82544) {
   10066 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10067 		new_readreg = wm_gmii_i82544_readreg;
   10068 		new_writereg = wm_gmii_i82544_writereg;
   10069 	} else {
   10070 		new_readreg = wm_gmii_i82543_readreg;
   10071 		new_writereg = wm_gmii_i82543_writereg;
   10072 	}
   10073 
   10074 	if (new_phytype == WMPHY_BM) {
   10075 		/* All BM use _bm_ */
   10076 		new_readreg = wm_gmii_bm_readreg;
   10077 		new_writereg = wm_gmii_bm_writereg;
   10078 	}
   10079 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10080 		/* All PCH* use _hv_ */
   10081 		new_readreg = wm_gmii_hv_readreg;
   10082 		new_writereg = wm_gmii_hv_writereg;
   10083 	}
   10084 
   10085 	/* Diag output */
   10086 	if (doubt_phytype != WMPHY_UNKNOWN)
   10087 		aprint_error_dev(dev, "Assumed new PHY type was "
   10088 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10089 		    new_phytype);
   10090 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10091 	    && (sc->sc_phytype != new_phytype ))
   10092 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10093 		    "was incorrect. New PHY type = %u\n",
   10094 		    sc->sc_phytype, new_phytype);
   10095 
   10096 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   10097 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   10098 
   10099 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   10100 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   10101 		    "function was incorrect.\n");
   10102 
   10103 	/* Update now */
   10104 	sc->sc_phytype = new_phytype;
   10105 	mii->mii_readreg = new_readreg;
   10106 	mii->mii_writereg = new_writereg;
   10107 	if (new_readreg == wm_gmii_hv_readreg) {
   10108 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10109 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10110 	} else if (new_readreg == wm_sgmii_readreg) {
   10111 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10112 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10113 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10114 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10115 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10116 	}
   10117 }
   10118 
   10119 /*
   10120  * wm_get_phy_id_82575:
   10121  *
   10122  * Return PHY ID. Return -1 if it failed.
   10123  */
   10124 static int
   10125 wm_get_phy_id_82575(struct wm_softc *sc)
   10126 {
   10127 	uint32_t reg;
   10128 	int phyid = -1;
   10129 
   10130 	/* XXX */
   10131 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10132 		return -1;
   10133 
   10134 	if (wm_sgmii_uses_mdio(sc)) {
   10135 		switch (sc->sc_type) {
   10136 		case WM_T_82575:
   10137 		case WM_T_82576:
   10138 			reg = CSR_READ(sc, WMREG_MDIC);
   10139 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10140 			break;
   10141 		case WM_T_82580:
   10142 		case WM_T_I350:
   10143 		case WM_T_I354:
   10144 		case WM_T_I210:
   10145 		case WM_T_I211:
   10146 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10147 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10148 			break;
   10149 		default:
   10150 			return -1;
   10151 		}
   10152 	}
   10153 
   10154 	return phyid;
   10155 }
   10156 
   10157 
   10158 /*
   10159  * wm_gmii_mediainit:
   10160  *
   10161  *	Initialize media for use on 1000BASE-T devices.
   10162  */
   10163 static void
   10164 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10165 {
   10166 	device_t dev = sc->sc_dev;
   10167 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10168 	struct mii_data *mii = &sc->sc_mii;
   10169 	uint32_t reg;
   10170 
   10171 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10172 		device_xname(sc->sc_dev), __func__));
   10173 
   10174 	/* We have GMII. */
   10175 	sc->sc_flags |= WM_F_HAS_MII;
   10176 
   10177 	if (sc->sc_type == WM_T_80003)
   10178 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10179 	else
   10180 		sc->sc_tipg = TIPG_1000T_DFLT;
   10181 
   10182 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10183 	if ((sc->sc_type == WM_T_82580)
   10184 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10185 	    || (sc->sc_type == WM_T_I211)) {
   10186 		reg = CSR_READ(sc, WMREG_PHPM);
   10187 		reg &= ~PHPM_GO_LINK_D;
   10188 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10189 	}
   10190 
   10191 	/*
   10192 	 * Let the chip set speed/duplex on its own based on
   10193 	 * signals from the PHY.
   10194 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10195 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10196 	 */
   10197 	sc->sc_ctrl |= CTRL_SLU;
   10198 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10199 
   10200 	/* Initialize our media structures and probe the GMII. */
   10201 	mii->mii_ifp = ifp;
   10202 
   10203 	mii->mii_statchg = wm_gmii_statchg;
   10204 
   10205 	/* get PHY control from SMBus to PCIe */
   10206 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10207 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10208 	    || (sc->sc_type == WM_T_PCH_CNP))
   10209 		wm_init_phy_workarounds_pchlan(sc);
   10210 
   10211 	wm_gmii_reset(sc);
   10212 
   10213 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10214 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10215 	    wm_gmii_mediastatus);
   10216 
   10217 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10218 	    || (sc->sc_type == WM_T_82580)
   10219 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10220 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10221 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10222 			/* Attach only one port */
   10223 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10224 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10225 		} else {
   10226 			int i, id;
   10227 			uint32_t ctrl_ext;
   10228 
   10229 			id = wm_get_phy_id_82575(sc);
   10230 			if (id != -1) {
   10231 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10232 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10233 			}
   10234 			if ((id == -1)
   10235 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10236 				/* Power on sgmii phy if it is disabled */
   10237 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10238 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10239 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10240 				CSR_WRITE_FLUSH(sc);
   10241 				delay(300*1000); /* XXX too long */
   10242 
   10243 				/* from 1 to 8 */
   10244 				for (i = 1; i < 8; i++)
   10245 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10246 					    0xffffffff, i, MII_OFFSET_ANY,
   10247 					    MIIF_DOPAUSE);
   10248 
   10249 				/* restore previous sfp cage power state */
   10250 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10251 			}
   10252 		}
   10253 	} else
   10254 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10255 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10256 
   10257 	/*
   10258 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10259 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10260 	 */
   10261 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10262 		|| (sc->sc_type == WM_T_PCH_SPT)
   10263 		|| (sc->sc_type == WM_T_PCH_CNP))
   10264 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10265 		wm_set_mdio_slow_mode_hv(sc);
   10266 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10267 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10268 	}
   10269 
   10270 	/*
   10271 	 * (For ICH8 variants)
   10272 	 * If PHY detection failed, use BM's r/w function and retry.
   10273 	 */
   10274 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10275 		/* if failed, retry with *_bm_* */
   10276 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10277 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10278 		    sc->sc_phytype);
   10279 		sc->sc_phytype = WMPHY_BM;
   10280 		mii->mii_readreg = wm_gmii_bm_readreg;
   10281 		mii->mii_writereg = wm_gmii_bm_writereg;
   10282 
   10283 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10284 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10285 	}
   10286 
   10287 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10288 		/* Any PHY wasn't find */
   10289 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10290 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10291 		sc->sc_phytype = WMPHY_NONE;
   10292 	} else {
   10293 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10294 
   10295 		/*
   10296 		 * PHY Found! Check PHY type again by the second call of
   10297 		 * wm_gmii_setup_phytype.
   10298 		 */
   10299 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10300 		    child->mii_mpd_model);
   10301 
   10302 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10303 	}
   10304 }
   10305 
   10306 /*
   10307  * wm_gmii_mediachange:	[ifmedia interface function]
   10308  *
   10309  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10310  */
   10311 static int
   10312 wm_gmii_mediachange(struct ifnet *ifp)
   10313 {
   10314 	struct wm_softc *sc = ifp->if_softc;
   10315 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10316 	int rc;
   10317 
   10318 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10319 		device_xname(sc->sc_dev), __func__));
   10320 	if ((ifp->if_flags & IFF_UP) == 0)
   10321 		return 0;
   10322 
   10323 	/* Disable D0 LPLU. */
   10324 	wm_lplu_d0_disable(sc);
   10325 
   10326 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10327 	sc->sc_ctrl |= CTRL_SLU;
   10328 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10329 	    || (sc->sc_type > WM_T_82543)) {
   10330 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10331 	} else {
   10332 		sc->sc_ctrl &= ~CTRL_ASDE;
   10333 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10334 		if (ife->ifm_media & IFM_FDX)
   10335 			sc->sc_ctrl |= CTRL_FD;
   10336 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10337 		case IFM_10_T:
   10338 			sc->sc_ctrl |= CTRL_SPEED_10;
   10339 			break;
   10340 		case IFM_100_TX:
   10341 			sc->sc_ctrl |= CTRL_SPEED_100;
   10342 			break;
   10343 		case IFM_1000_T:
   10344 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10345 			break;
   10346 		case IFM_NONE:
   10347 			/* There is no specific setting for IFM_NONE */
   10348 			break;
   10349 		default:
   10350 			panic("wm_gmii_mediachange: bad media 0x%x",
   10351 			    ife->ifm_media);
   10352 		}
   10353 	}
   10354 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10355 	CSR_WRITE_FLUSH(sc);
   10356 	if (sc->sc_type <= WM_T_82543)
   10357 		wm_gmii_reset(sc);
   10358 
   10359 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10360 		return 0;
   10361 	return rc;
   10362 }
   10363 
   10364 /*
   10365  * wm_gmii_mediastatus:	[ifmedia interface function]
   10366  *
   10367  *	Get the current interface media status on a 1000BASE-T device.
   10368  */
   10369 static void
   10370 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10371 {
   10372 	struct wm_softc *sc = ifp->if_softc;
   10373 
   10374 	ether_mediastatus(ifp, ifmr);
   10375 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10376 	    | sc->sc_flowflags;
   10377 }
   10378 
   10379 #define	MDI_IO		CTRL_SWDPIN(2)
   10380 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10381 #define	MDI_CLK		CTRL_SWDPIN(3)
   10382 
   10383 static void
   10384 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10385 {
   10386 	uint32_t i, v;
   10387 
   10388 	v = CSR_READ(sc, WMREG_CTRL);
   10389 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10390 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10391 
   10392 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   10393 		if (data & i)
   10394 			v |= MDI_IO;
   10395 		else
   10396 			v &= ~MDI_IO;
   10397 		CSR_WRITE(sc, WMREG_CTRL, v);
   10398 		CSR_WRITE_FLUSH(sc);
   10399 		delay(10);
   10400 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10401 		CSR_WRITE_FLUSH(sc);
   10402 		delay(10);
   10403 		CSR_WRITE(sc, WMREG_CTRL, v);
   10404 		CSR_WRITE_FLUSH(sc);
   10405 		delay(10);
   10406 	}
   10407 }
   10408 
   10409 static uint16_t
   10410 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10411 {
   10412 	uint32_t v, i;
   10413 	uint16_t data = 0;
   10414 
   10415 	v = CSR_READ(sc, WMREG_CTRL);
   10416 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10417 	v |= CTRL_SWDPIO(3);
   10418 
   10419 	CSR_WRITE(sc, WMREG_CTRL, v);
   10420 	CSR_WRITE_FLUSH(sc);
   10421 	delay(10);
   10422 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10423 	CSR_WRITE_FLUSH(sc);
   10424 	delay(10);
   10425 	CSR_WRITE(sc, WMREG_CTRL, v);
   10426 	CSR_WRITE_FLUSH(sc);
   10427 	delay(10);
   10428 
   10429 	for (i = 0; i < 16; i++) {
   10430 		data <<= 1;
   10431 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10432 		CSR_WRITE_FLUSH(sc);
   10433 		delay(10);
   10434 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10435 			data |= 1;
   10436 		CSR_WRITE(sc, WMREG_CTRL, v);
   10437 		CSR_WRITE_FLUSH(sc);
   10438 		delay(10);
   10439 	}
   10440 
   10441 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10442 	CSR_WRITE_FLUSH(sc);
   10443 	delay(10);
   10444 	CSR_WRITE(sc, WMREG_CTRL, v);
   10445 	CSR_WRITE_FLUSH(sc);
   10446 	delay(10);
   10447 
   10448 	return data;
   10449 }
   10450 
   10451 #undef MDI_IO
   10452 #undef MDI_DIR
   10453 #undef MDI_CLK
   10454 
   10455 /*
   10456  * wm_gmii_i82543_readreg:	[mii interface function]
   10457  *
   10458  *	Read a PHY register on the GMII (i82543 version).
   10459  */
   10460 static int
   10461 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10462 {
   10463 	struct wm_softc *sc = device_private(dev);
   10464 
   10465 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10466 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10467 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10468 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10469 
   10470 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10471 		device_xname(dev), phy, reg, *val));
   10472 
   10473 	return 0;
   10474 }
   10475 
   10476 /*
   10477  * wm_gmii_i82543_writereg:	[mii interface function]
   10478  *
   10479  *	Write a PHY register on the GMII (i82543 version).
   10480  */
   10481 static int
   10482 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10483 {
   10484 	struct wm_softc *sc = device_private(dev);
   10485 
   10486 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10487 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10488 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10489 	    (MII_COMMAND_START << 30), 32);
   10490 
   10491 	return 0;
   10492 }
   10493 
   10494 /*
   10495  * wm_gmii_mdic_readreg:	[mii interface function]
   10496  *
   10497  *	Read a PHY register on the GMII.
   10498  */
   10499 static int
   10500 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10501 {
   10502 	struct wm_softc *sc = device_private(dev);
   10503 	uint32_t mdic = 0;
   10504 	int i;
   10505 
   10506 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10507 	    && (reg > MII_ADDRMASK)) {
   10508 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10509 		    __func__, sc->sc_phytype, reg);
   10510 		reg &= MII_ADDRMASK;
   10511 	}
   10512 
   10513 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10514 	    MDIC_REGADD(reg));
   10515 
   10516 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10517 		delay(50);
   10518 		mdic = CSR_READ(sc, WMREG_MDIC);
   10519 		if (mdic & MDIC_READY)
   10520 			break;
   10521 	}
   10522 
   10523 	if ((mdic & MDIC_READY) == 0) {
   10524 		DPRINTF(WM_DEBUG_GMII,
   10525 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10526 			device_xname(dev), phy, reg));
   10527 		return ETIMEDOUT;
   10528 	} else if (mdic & MDIC_E) {
   10529 		/* This is normal if no PHY is present. */
   10530 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10531 			device_xname(sc->sc_dev), phy, reg));
   10532 		return -1;
   10533 	} else
   10534 		*val = MDIC_DATA(mdic);
   10535 
   10536 	/*
   10537 	 * Allow some time after each MDIC transaction to avoid
   10538 	 * reading duplicate data in the next MDIC transaction.
   10539 	 */
   10540 	if (sc->sc_type == WM_T_PCH2)
   10541 		delay(100);
   10542 
   10543 	return 0;
   10544 }
   10545 
   10546 /*
   10547  * wm_gmii_mdic_writereg:	[mii interface function]
   10548  *
   10549  *	Write a PHY register on the GMII.
   10550  */
   10551 static int
   10552 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10553 {
   10554 	struct wm_softc *sc = device_private(dev);
   10555 	uint32_t mdic = 0;
   10556 	int i;
   10557 
   10558 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10559 	    && (reg > MII_ADDRMASK)) {
   10560 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10561 		    __func__, sc->sc_phytype, reg);
   10562 		reg &= MII_ADDRMASK;
   10563 	}
   10564 
   10565 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10566 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10567 
   10568 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10569 		delay(50);
   10570 		mdic = CSR_READ(sc, WMREG_MDIC);
   10571 		if (mdic & MDIC_READY)
   10572 			break;
   10573 	}
   10574 
   10575 	if ((mdic & MDIC_READY) == 0) {
   10576 		DPRINTF(WM_DEBUG_GMII,
   10577 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10578 			device_xname(dev), phy, reg));
   10579 		return ETIMEDOUT;
   10580 	} else if (mdic & MDIC_E) {
   10581 		DPRINTF(WM_DEBUG_GMII,
   10582 		    ("%s: MDIC write error: phy %d reg %d\n",
   10583 			device_xname(dev), phy, reg));
   10584 		return -1;
   10585 	}
   10586 
   10587 	/*
   10588 	 * Allow some time after each MDIC transaction to avoid
   10589 	 * reading duplicate data in the next MDIC transaction.
   10590 	 */
   10591 	if (sc->sc_type == WM_T_PCH2)
   10592 		delay(100);
   10593 
   10594 	return 0;
   10595 }
   10596 
   10597 /*
   10598  * wm_gmii_i82544_readreg:	[mii interface function]
   10599  *
   10600  *	Read a PHY register on the GMII.
   10601  */
   10602 static int
   10603 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10604 {
   10605 	struct wm_softc *sc = device_private(dev);
   10606 	int rv;
   10607 
   10608 	if (sc->phy.acquire(sc)) {
   10609 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10610 		return -1;
   10611 	}
   10612 
   10613 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10614 
   10615 	sc->phy.release(sc);
   10616 
   10617 	return rv;
   10618 }
   10619 
   10620 static int
   10621 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10622 {
   10623 	struct wm_softc *sc = device_private(dev);
   10624 
   10625 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10626 		switch (sc->sc_phytype) {
   10627 		case WMPHY_IGP:
   10628 		case WMPHY_IGP_2:
   10629 		case WMPHY_IGP_3:
   10630 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10631 			    reg);
   10632 			break;
   10633 		default:
   10634 #ifdef WM_DEBUG
   10635 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10636 			    __func__, sc->sc_phytype, reg);
   10637 #endif
   10638 			break;
   10639 		}
   10640 	}
   10641 
   10642 	wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10643 
   10644 	return 0;
   10645 }
   10646 
   10647 /*
   10648  * wm_gmii_i82544_writereg:	[mii interface function]
   10649  *
   10650  *	Write a PHY register on the GMII.
   10651  */
   10652 static int
   10653 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10654 {
   10655 	struct wm_softc *sc = device_private(dev);
   10656 	int rv;
   10657 
   10658 	if (sc->phy.acquire(sc)) {
   10659 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10660 		return -1;
   10661 	}
   10662 
   10663 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10664 	sc->phy.release(sc);
   10665 
   10666 	return rv;
   10667 }
   10668 
   10669 static int
   10670 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10671 {
   10672 	struct wm_softc *sc = device_private(dev);
   10673 
   10674 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10675 		switch (sc->sc_phytype) {
   10676 		case WMPHY_IGP:
   10677 		case WMPHY_IGP_2:
   10678 		case WMPHY_IGP_3:
   10679 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10680 			    reg);
   10681 			break;
   10682 		default:
   10683 #ifdef WM_DEBUG
   10684 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10685 			    __func__, sc->sc_phytype, reg);
   10686 #endif
   10687 			break;
   10688 		}
   10689 	}
   10690 
   10691 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10692 
   10693 	return 0;
   10694 }
   10695 
   10696 /*
   10697  * wm_gmii_i80003_readreg:	[mii interface function]
   10698  *
   10699  *	Read a PHY register on the kumeran
   10700  * This could be handled by the PHY layer if we didn't have to lock the
   10701  * ressource ...
   10702  */
   10703 static int
   10704 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10705 {
   10706 	struct wm_softc *sc = device_private(dev);
   10707 	int page_select;
   10708 	uint16_t temp, temp2;
   10709 	int rv = 0;
   10710 
   10711 	if (phy != 1) /* only one PHY on kumeran bus */
   10712 		return -1;
   10713 
   10714 	if (sc->phy.acquire(sc)) {
   10715 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10716 		return -1;
   10717 	}
   10718 
   10719 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10720 		page_select = GG82563_PHY_PAGE_SELECT;
   10721 	else {
   10722 		/*
   10723 		 * Use Alternative Page Select register to access registers
   10724 		 * 30 and 31.
   10725 		 */
   10726 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10727 	}
   10728 	temp = reg >> GG82563_PAGE_SHIFT;
   10729 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10730 		goto out;
   10731 
   10732 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10733 		/*
   10734 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10735 		 * register.
   10736 		 */
   10737 		delay(200);
   10738 		wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10739 		if (temp2 != temp) {
   10740 			device_printf(dev, "%s failed\n", __func__);
   10741 			rv = -1;
   10742 			goto out;
   10743 		}
   10744 		delay(200);
   10745 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10746 		delay(200);
   10747 	} else
   10748 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10749 
   10750 out:
   10751 	sc->phy.release(sc);
   10752 	return rv;
   10753 }
   10754 
   10755 /*
   10756  * wm_gmii_i80003_writereg:	[mii interface function]
   10757  *
   10758  *	Write a PHY register on the kumeran.
   10759  * This could be handled by the PHY layer if we didn't have to lock the
   10760  * ressource ...
   10761  */
   10762 static int
   10763 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   10764 {
   10765 	struct wm_softc *sc = device_private(dev);
   10766 	int page_select, rv;
   10767 	uint16_t temp, temp2;
   10768 
   10769 	if (phy != 1) /* only one PHY on kumeran bus */
   10770 		return -1;
   10771 
   10772 	if (sc->phy.acquire(sc)) {
   10773 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10774 		return -1;
   10775 	}
   10776 
   10777 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10778 		page_select = GG82563_PHY_PAGE_SELECT;
   10779 	else {
   10780 		/*
   10781 		 * Use Alternative Page Select register to access registers
   10782 		 * 30 and 31.
   10783 		 */
   10784 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10785 	}
   10786 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10787 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10788 		goto out;
   10789 
   10790 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10791 		/*
   10792 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10793 		 * register.
   10794 		 */
   10795 		delay(200);
   10796 		wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10797 		if (temp2 != temp) {
   10798 			device_printf(dev, "%s failed\n", __func__);
   10799 			rv = -1;
   10800 			goto out;
   10801 		}
   10802 		delay(200);
   10803 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10804 		delay(200);
   10805 	} else
   10806 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10807 
   10808 out:
   10809 	sc->phy.release(sc);
   10810 	return rv;
   10811 }
   10812 
   10813 /*
   10814  * wm_gmii_bm_readreg:	[mii interface function]
   10815  *
   10816  *	Read a PHY register on the kumeran
   10817  * This could be handled by the PHY layer if we didn't have to lock the
   10818  * ressource ...
   10819  */
   10820 static int
   10821 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10822 {
   10823 	struct wm_softc *sc = device_private(dev);
   10824 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10825 	int rv;
   10826 
   10827 	if (sc->phy.acquire(sc)) {
   10828 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10829 		return -1;
   10830 	}
   10831 
   10832 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10833 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10834 		    || (reg == 31)) ? 1 : phy;
   10835 	/* Page 800 works differently than the rest so it has its own func */
   10836 	if (page == BM_WUC_PAGE) {
   10837 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   10838 		goto release;
   10839 	}
   10840 
   10841 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10842 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10843 		    && (sc->sc_type != WM_T_82583))
   10844 			rv = wm_gmii_mdic_writereg(dev, phy,
   10845 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10846 		else
   10847 			rv = wm_gmii_mdic_writereg(dev, phy,
   10848 			    BME1000_PHY_PAGE_SELECT, page);
   10849 		if (rv != 0)
   10850 			goto release;
   10851 	}
   10852 
   10853 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10854 
   10855 release:
   10856 	sc->phy.release(sc);
   10857 	return rv;
   10858 }
   10859 
   10860 /*
   10861  * wm_gmii_bm_writereg:	[mii interface function]
   10862  *
   10863  *	Write a PHY register on the kumeran.
   10864  * This could be handled by the PHY layer if we didn't have to lock the
   10865  * ressource ...
   10866  */
   10867 static int
   10868 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   10869 {
   10870 	struct wm_softc *sc = device_private(dev);
   10871 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10872 	int rv;
   10873 
   10874 	if (sc->phy.acquire(sc)) {
   10875 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10876 		return -1;
   10877 	}
   10878 
   10879 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10880 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10881 		    || (reg == 31)) ? 1 : phy;
   10882 	/* Page 800 works differently than the rest so it has its own func */
   10883 	if (page == BM_WUC_PAGE) {
   10884 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   10885 		goto release;
   10886 	}
   10887 
   10888 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10889 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10890 		    && (sc->sc_type != WM_T_82583))
   10891 			rv = wm_gmii_mdic_writereg(dev, phy,
   10892 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10893 		else
   10894 			rv = wm_gmii_mdic_writereg(dev, phy,
   10895 			    BME1000_PHY_PAGE_SELECT, page);
   10896 		if (rv != 0)
   10897 			goto release;
   10898 	}
   10899 
   10900 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10901 
   10902 release:
   10903 	sc->phy.release(sc);
   10904 	return rv;
   10905 }
   10906 
   10907 /*
   10908  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   10909  *  @dev: pointer to the HW structure
   10910  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   10911  *
   10912  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   10913  *  address to store contents of the BM_WUC_ENABLE_REG register.
   10914  */
   10915 static int
   10916 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10917 {
   10918 	uint16_t temp;
   10919 	int rv;
   10920 
   10921 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10922 		device_xname(dev), __func__));
   10923 
   10924 	if (!phy_regp)
   10925 		return -1;
   10926 
   10927 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   10928 
   10929 	/* Select Port Control Registers page */
   10930 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10931 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10932 	if (rv != 0)
   10933 		return rv;
   10934 
   10935 	/* Read WUCE and save it */
   10936 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   10937 	if (rv != 0)
   10938 		return rv;
   10939 
   10940 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   10941 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   10942 	 */
   10943 	temp = *phy_regp;
   10944 	temp |= BM_WUC_ENABLE_BIT;
   10945 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10946 
   10947 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   10948 		return rv;
   10949 
   10950 	/* Select Host Wakeup Registers page - caller now able to write
   10951 	 * registers on the Wakeup registers page
   10952 	 */
   10953 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10954 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   10955 }
   10956 
   10957 /*
   10958  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   10959  *  @dev: pointer to the HW structure
   10960  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   10961  *
   10962  *  Restore BM_WUC_ENABLE_REG to its original value.
   10963  *
   10964  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   10965  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   10966  *  caller.
   10967  */
   10968 static int
   10969 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10970 {
   10971 
   10972 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10973 		device_xname(dev), __func__));
   10974 
   10975 	if (!phy_regp)
   10976 		return -1;
   10977 
   10978 	/* Select Port Control Registers page */
   10979 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10980 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10981 
   10982 	/* Restore 769.17 to its original value */
   10983 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   10984 
   10985 	return 0;
   10986 }
   10987 
   10988 /*
   10989  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   10990  *  @sc: pointer to the HW structure
   10991  *  @offset: register offset to be read or written
   10992  *  @val: pointer to the data to read or write
   10993  *  @rd: determines if operation is read or write
   10994  *  @page_set: BM_WUC_PAGE already set and access enabled
   10995  *
   10996  *  Read the PHY register at offset and store the retrieved information in
   10997  *  data, or write data to PHY register at offset.  Note the procedure to
   10998  *  access the PHY wakeup registers is different than reading the other PHY
   10999  *  registers. It works as such:
   11000  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11001  *  2) Set page to 800 for host (801 if we were manageability)
   11002  *  3) Write the address using the address opcode (0x11)
   11003  *  4) Read or write the data using the data opcode (0x12)
   11004  *  5) Restore 769.17.2 to its original value
   11005  *
   11006  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11007  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11008  *
   11009  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11010  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11011  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11012  */
   11013 static int
   11014 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11015 	bool page_set)
   11016 {
   11017 	struct wm_softc *sc = device_private(dev);
   11018 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11019 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11020 	uint16_t wuce;
   11021 	int rv = 0;
   11022 
   11023 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11024 		device_xname(dev), __func__));
   11025 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11026 	if ((sc->sc_type == WM_T_PCH)
   11027 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11028 		device_printf(dev,
   11029 		    "Attempting to access page %d while gig enabled.\n", page);
   11030 	}
   11031 
   11032 	if (!page_set) {
   11033 		/* Enable access to PHY wakeup registers */
   11034 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11035 		if (rv != 0) {
   11036 			device_printf(dev,
   11037 			    "%s: Could not enable PHY wakeup reg access\n",
   11038 			    __func__);
   11039 			return rv;
   11040 		}
   11041 	}
   11042 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11043 		device_xname(sc->sc_dev), __func__, page, regnum));
   11044 
   11045 	/*
   11046 	 * 2) Access PHY wakeup register.
   11047 	 * See wm_access_phy_wakeup_reg_bm.
   11048 	 */
   11049 
   11050 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11051 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11052 	if (rv != 0)
   11053 		return rv;
   11054 
   11055 	if (rd) {
   11056 		/* Read the Wakeup register page value using opcode 0x12 */
   11057 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11058 	} else {
   11059 		/* Write the Wakeup register page value using opcode 0x12 */
   11060 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11061 	}
   11062 	if (rv != 0)
   11063 		return rv;
   11064 
   11065 	if (!page_set)
   11066 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11067 
   11068 	return rv;
   11069 }
   11070 
   11071 /*
   11072  * wm_gmii_hv_readreg:	[mii interface function]
   11073  *
   11074  *	Read a PHY register on the kumeran
   11075  * This could be handled by the PHY layer if we didn't have to lock the
   11076  * ressource ...
   11077  */
   11078 static int
   11079 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11080 {
   11081 	struct wm_softc *sc = device_private(dev);
   11082 	int rv;
   11083 
   11084 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11085 		device_xname(dev), __func__));
   11086 	if (sc->phy.acquire(sc)) {
   11087 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11088 		return -1;
   11089 	}
   11090 
   11091 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11092 	sc->phy.release(sc);
   11093 	return rv;
   11094 }
   11095 
   11096 static int
   11097 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11098 {
   11099 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11100 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11101 	int rv;
   11102 
   11103 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11104 
   11105 	/* Page 800 works differently than the rest so it has its own func */
   11106 	if (page == BM_WUC_PAGE)
   11107 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11108 
   11109 	/*
   11110 	 * Lower than page 768 works differently than the rest so it has its
   11111 	 * own func
   11112 	 */
   11113 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11114 		printf("gmii_hv_readreg!!!\n");
   11115 		return -1;
   11116 	}
   11117 
   11118 	/*
   11119 	 * XXX I21[789] documents say that the SMBus Address register is at
   11120 	 * PHY address 01, Page 0 (not 768), Register 26.
   11121 	 */
   11122 	if (page == HV_INTC_FC_PAGE_START)
   11123 		page = 0;
   11124 
   11125 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11126 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11127 		    page << BME1000_PAGE_SHIFT);
   11128 		if (rv != 0)
   11129 			return rv;
   11130 	}
   11131 
   11132 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11133 }
   11134 
   11135 /*
   11136  * wm_gmii_hv_writereg:	[mii interface function]
   11137  *
   11138  *	Write a PHY register on the kumeran.
   11139  * This could be handled by the PHY layer if we didn't have to lock the
   11140  * ressource ...
   11141  */
   11142 static int
   11143 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11144 {
   11145 	struct wm_softc *sc = device_private(dev);
   11146 	int rv;
   11147 
   11148 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11149 		device_xname(dev), __func__));
   11150 
   11151 	if (sc->phy.acquire(sc)) {
   11152 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11153 		return -1;
   11154 	}
   11155 
   11156 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11157 	sc->phy.release(sc);
   11158 
   11159 	return rv;
   11160 }
   11161 
   11162 static int
   11163 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11164 {
   11165 	struct wm_softc *sc = device_private(dev);
   11166 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11167 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11168 	int rv;
   11169 
   11170 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11171 
   11172 	/* Page 800 works differently than the rest so it has its own func */
   11173 	if (page == BM_WUC_PAGE)
   11174 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11175 		    false);
   11176 
   11177 	/*
   11178 	 * Lower than page 768 works differently than the rest so it has its
   11179 	 * own func
   11180 	 */
   11181 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11182 		printf("gmii_hv_writereg!!!\n");
   11183 		return -1;
   11184 	}
   11185 
   11186 	{
   11187 		/*
   11188 		 * XXX I21[789] documents say that the SMBus Address register
   11189 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11190 		 */
   11191 		if (page == HV_INTC_FC_PAGE_START)
   11192 			page = 0;
   11193 
   11194 		/*
   11195 		 * XXX Workaround MDIO accesses being disabled after entering
   11196 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11197 		 * register is set)
   11198 		 */
   11199 		if (sc->sc_phytype == WMPHY_82578) {
   11200 			struct mii_softc *child;
   11201 
   11202 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11203 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11204 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11205 			    && ((val & (1 << 11)) != 0)) {
   11206 				printf("XXX need workaround\n");
   11207 			}
   11208 		}
   11209 
   11210 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11211 			rv = wm_gmii_mdic_writereg(dev, 1,
   11212 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11213 			if (rv != 0)
   11214 				return rv;
   11215 		}
   11216 	}
   11217 
   11218 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11219 }
   11220 
   11221 /*
   11222  * wm_gmii_82580_readreg:	[mii interface function]
   11223  *
   11224  *	Read a PHY register on the 82580 and I350.
   11225  * This could be handled by the PHY layer if we didn't have to lock the
   11226  * ressource ...
   11227  */
   11228 static int
   11229 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11230 {
   11231 	struct wm_softc *sc = device_private(dev);
   11232 	int rv;
   11233 
   11234 	if (sc->phy.acquire(sc) != 0) {
   11235 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11236 		return -1;
   11237 	}
   11238 
   11239 #ifdef DIAGNOSTIC
   11240 	if (reg > MII_ADDRMASK) {
   11241 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11242 		    __func__, sc->sc_phytype, reg);
   11243 		reg &= MII_ADDRMASK;
   11244 	}
   11245 #endif
   11246 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11247 
   11248 	sc->phy.release(sc);
   11249 	return rv;
   11250 }
   11251 
   11252 /*
   11253  * wm_gmii_82580_writereg:	[mii interface function]
   11254  *
   11255  *	Write a PHY register on the 82580 and I350.
   11256  * This could be handled by the PHY layer if we didn't have to lock the
   11257  * ressource ...
   11258  */
   11259 static int
   11260 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11261 {
   11262 	struct wm_softc *sc = device_private(dev);
   11263 	int rv;
   11264 
   11265 	if (sc->phy.acquire(sc) != 0) {
   11266 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11267 		return -1;
   11268 	}
   11269 
   11270 #ifdef DIAGNOSTIC
   11271 	if (reg > MII_ADDRMASK) {
   11272 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11273 		    __func__, sc->sc_phytype, reg);
   11274 		reg &= MII_ADDRMASK;
   11275 	}
   11276 #endif
   11277 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11278 
   11279 	sc->phy.release(sc);
   11280 	return rv;
   11281 }
   11282 
   11283 /*
   11284  * wm_gmii_gs40g_readreg:	[mii interface function]
   11285  *
   11286  *	Read a PHY register on the I2100 and I211.
   11287  * This could be handled by the PHY layer if we didn't have to lock the
   11288  * ressource ...
   11289  */
   11290 static int
   11291 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11292 {
   11293 	struct wm_softc *sc = device_private(dev);
   11294 	int page, offset;
   11295 	int rv;
   11296 
   11297 	/* Acquire semaphore */
   11298 	if (sc->phy.acquire(sc)) {
   11299 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11300 		return -1;
   11301 	}
   11302 
   11303 	/* Page select */
   11304 	page = reg >> GS40G_PAGE_SHIFT;
   11305 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11306 	if (rv != 0)
   11307 		goto release;
   11308 
   11309 	/* Read reg */
   11310 	offset = reg & GS40G_OFFSET_MASK;
   11311 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11312 
   11313 release:
   11314 	sc->phy.release(sc);
   11315 	return rv;
   11316 }
   11317 
   11318 /*
   11319  * wm_gmii_gs40g_writereg:	[mii interface function]
   11320  *
   11321  *	Write a PHY register on the I210 and I211.
   11322  * This could be handled by the PHY layer if we didn't have to lock the
   11323  * ressource ...
   11324  */
   11325 static int
   11326 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11327 {
   11328 	struct wm_softc *sc = device_private(dev);
   11329 	uint16_t page;
   11330 	int offset, rv;
   11331 
   11332 	/* Acquire semaphore */
   11333 	if (sc->phy.acquire(sc)) {
   11334 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11335 		return -1;
   11336 	}
   11337 
   11338 	/* Page select */
   11339 	page = reg >> GS40G_PAGE_SHIFT;
   11340 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11341 	if (rv != 0)
   11342 		goto release;
   11343 
   11344 	/* Write reg */
   11345 	offset = reg & GS40G_OFFSET_MASK;
   11346 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11347 
   11348 release:
   11349 	/* Release semaphore */
   11350 	sc->phy.release(sc);
   11351 	return rv;
   11352 }
   11353 
   11354 /*
   11355  * wm_gmii_statchg:	[mii interface function]
   11356  *
   11357  *	Callback from MII layer when media changes.
   11358  */
   11359 static void
   11360 wm_gmii_statchg(struct ifnet *ifp)
   11361 {
   11362 	struct wm_softc *sc = ifp->if_softc;
   11363 	struct mii_data *mii = &sc->sc_mii;
   11364 
   11365 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11366 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11367 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11368 
   11369 	/*
   11370 	 * Get flow control negotiation result.
   11371 	 */
   11372 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11373 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11374 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11375 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11376 	}
   11377 
   11378 	if (sc->sc_flowflags & IFM_FLOW) {
   11379 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11380 			sc->sc_ctrl |= CTRL_TFCE;
   11381 			sc->sc_fcrtl |= FCRTL_XONE;
   11382 		}
   11383 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11384 			sc->sc_ctrl |= CTRL_RFCE;
   11385 	}
   11386 
   11387 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   11388 		DPRINTF(WM_DEBUG_LINK,
   11389 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11390 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11391 	} else {
   11392 		DPRINTF(WM_DEBUG_LINK,
   11393 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11394 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11395 	}
   11396 
   11397 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11398 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11399 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11400 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11401 	if (sc->sc_type == WM_T_80003) {
   11402 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   11403 		case IFM_1000_T:
   11404 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11405 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11406 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11407 			break;
   11408 		default:
   11409 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11410 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11411 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11412 			break;
   11413 		}
   11414 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11415 	}
   11416 }
   11417 
   11418 /* kumeran related (80003, ICH* and PCH*) */
   11419 
   11420 /*
   11421  * wm_kmrn_readreg:
   11422  *
   11423  *	Read a kumeran register
   11424  */
   11425 static int
   11426 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11427 {
   11428 	int rv;
   11429 
   11430 	if (sc->sc_type == WM_T_80003)
   11431 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11432 	else
   11433 		rv = sc->phy.acquire(sc);
   11434 	if (rv != 0) {
   11435 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11436 		    __func__);
   11437 		return rv;
   11438 	}
   11439 
   11440 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11441 
   11442 	if (sc->sc_type == WM_T_80003)
   11443 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11444 	else
   11445 		sc->phy.release(sc);
   11446 
   11447 	return rv;
   11448 }
   11449 
   11450 static int
   11451 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11452 {
   11453 
   11454 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11455 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11456 	    KUMCTRLSTA_REN);
   11457 	CSR_WRITE_FLUSH(sc);
   11458 	delay(2);
   11459 
   11460 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11461 
   11462 	return 0;
   11463 }
   11464 
   11465 /*
   11466  * wm_kmrn_writereg:
   11467  *
   11468  *	Write a kumeran register
   11469  */
   11470 static int
   11471 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11472 {
   11473 	int rv;
   11474 
   11475 	if (sc->sc_type == WM_T_80003)
   11476 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11477 	else
   11478 		rv = sc->phy.acquire(sc);
   11479 	if (rv != 0) {
   11480 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11481 		    __func__);
   11482 		return rv;
   11483 	}
   11484 
   11485 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11486 
   11487 	if (sc->sc_type == WM_T_80003)
   11488 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11489 	else
   11490 		sc->phy.release(sc);
   11491 
   11492 	return rv;
   11493 }
   11494 
   11495 static int
   11496 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11497 {
   11498 
   11499 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11500 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11501 
   11502 	return 0;
   11503 }
   11504 
   11505 /*
   11506  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11507  * This access method is different from IEEE MMD.
   11508  */
   11509 static int
   11510 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11511 {
   11512 	struct wm_softc *sc = device_private(dev);
   11513 	int rv;
   11514 
   11515 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11516 	if (rv != 0)
   11517 		return rv;
   11518 
   11519 	if (rd)
   11520 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11521 	else
   11522 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11523 	return rv;
   11524 }
   11525 
   11526 static int
   11527 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11528 {
   11529 
   11530 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11531 }
   11532 
   11533 static int
   11534 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11535 {
   11536 
   11537 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11538 }
   11539 
   11540 /* SGMII related */
   11541 
   11542 /*
   11543  * wm_sgmii_uses_mdio
   11544  *
   11545  * Check whether the transaction is to the internal PHY or the external
   11546  * MDIO interface. Return true if it's MDIO.
   11547  */
   11548 static bool
   11549 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11550 {
   11551 	uint32_t reg;
   11552 	bool ismdio = false;
   11553 
   11554 	switch (sc->sc_type) {
   11555 	case WM_T_82575:
   11556 	case WM_T_82576:
   11557 		reg = CSR_READ(sc, WMREG_MDIC);
   11558 		ismdio = ((reg & MDIC_DEST) != 0);
   11559 		break;
   11560 	case WM_T_82580:
   11561 	case WM_T_I350:
   11562 	case WM_T_I354:
   11563 	case WM_T_I210:
   11564 	case WM_T_I211:
   11565 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11566 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11567 		break;
   11568 	default:
   11569 		break;
   11570 	}
   11571 
   11572 	return ismdio;
   11573 }
   11574 
   11575 /*
   11576  * wm_sgmii_readreg:	[mii interface function]
   11577  *
   11578  *	Read a PHY register on the SGMII
   11579  * This could be handled by the PHY layer if we didn't have to lock the
   11580  * ressource ...
   11581  */
   11582 static int
   11583 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11584 {
   11585 	struct wm_softc *sc = device_private(dev);
   11586 	int rv;
   11587 
   11588 	if (sc->phy.acquire(sc)) {
   11589 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11590 		return -1;
   11591 	}
   11592 
   11593 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11594 
   11595 	sc->phy.release(sc);
   11596 	return rv;
   11597 }
   11598 
   11599 static int
   11600 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11601 {
   11602 	struct wm_softc *sc = device_private(dev);
   11603 	uint32_t i2ccmd;
   11604 	int i, rv;
   11605 
   11606 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11607 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11608 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11609 
   11610 	/* Poll the ready bit */
   11611 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11612 		delay(50);
   11613 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11614 		if (i2ccmd & I2CCMD_READY)
   11615 			break;
   11616 	}
   11617 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11618 		device_printf(dev, "I2CCMD Read did not complete\n");
   11619 		rv = ETIMEDOUT;
   11620 	}
   11621 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11622 		device_printf(dev, "I2CCMD Error bit set\n");
   11623 		rv = EIO;
   11624 	}
   11625 
   11626 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11627 
   11628 	return rv;
   11629 }
   11630 
   11631 /*
   11632  * wm_sgmii_writereg:	[mii interface function]
   11633  *
   11634  *	Write a PHY register on the SGMII.
   11635  * This could be handled by the PHY layer if we didn't have to lock the
   11636  * ressource ...
   11637  */
   11638 static int
   11639 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11640 {
   11641 	struct wm_softc *sc = device_private(dev);
   11642 	int rv;
   11643 
   11644 	if (sc->phy.acquire(sc) != 0) {
   11645 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11646 		return -1;
   11647 	}
   11648 
   11649 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11650 
   11651 	sc->phy.release(sc);
   11652 
   11653 	return rv;
   11654 }
   11655 
   11656 static int
   11657 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11658 {
   11659 	struct wm_softc *sc = device_private(dev);
   11660 	uint32_t i2ccmd;
   11661 	uint16_t swapdata;
   11662 	int rv = 0;
   11663 	int i;
   11664 
   11665 	/* Swap the data bytes for the I2C interface */
   11666 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11667 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11668 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11669 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11670 
   11671 	/* Poll the ready bit */
   11672 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11673 		delay(50);
   11674 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11675 		if (i2ccmd & I2CCMD_READY)
   11676 			break;
   11677 	}
   11678 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11679 		device_printf(dev, "I2CCMD Write did not complete\n");
   11680 		rv = ETIMEDOUT;
   11681 	}
   11682 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11683 		device_printf(dev, "I2CCMD Error bit set\n");
   11684 		rv = EIO;
   11685 	}
   11686 
   11687 	return rv;
   11688 }
   11689 
   11690 /* TBI related */
   11691 
   11692 static bool
   11693 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11694 {
   11695 	bool sig;
   11696 
   11697 	sig = ctrl & CTRL_SWDPIN(1);
   11698 
   11699 	/*
   11700 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11701 	 * detect a signal, 1 if they don't.
   11702 	 */
   11703 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11704 		sig = !sig;
   11705 
   11706 	return sig;
   11707 }
   11708 
   11709 /*
   11710  * wm_tbi_mediainit:
   11711  *
   11712  *	Initialize media for use on 1000BASE-X devices.
   11713  */
   11714 static void
   11715 wm_tbi_mediainit(struct wm_softc *sc)
   11716 {
   11717 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11718 	const char *sep = "";
   11719 
   11720 	if (sc->sc_type < WM_T_82543)
   11721 		sc->sc_tipg = TIPG_WM_DFLT;
   11722 	else
   11723 		sc->sc_tipg = TIPG_LG_DFLT;
   11724 
   11725 	sc->sc_tbi_serdes_anegticks = 5;
   11726 
   11727 	/* Initialize our media structures */
   11728 	sc->sc_mii.mii_ifp = ifp;
   11729 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11730 
   11731 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11732 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11733 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11734 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11735 	else
   11736 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11737 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11738 
   11739 	/*
   11740 	 * SWD Pins:
   11741 	 *
   11742 	 *	0 = Link LED (output)
   11743 	 *	1 = Loss Of Signal (input)
   11744 	 */
   11745 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11746 
   11747 	/* XXX Perhaps this is only for TBI */
   11748 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11749 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11750 
   11751 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11752 		sc->sc_ctrl &= ~CTRL_LRST;
   11753 
   11754 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11755 
   11756 #define	ADD(ss, mm, dd)							\
   11757 do {									\
   11758 	aprint_normal("%s%s", sep, ss);					\
   11759 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11760 	sep = ", ";							\
   11761 } while (/*CONSTCOND*/0)
   11762 
   11763 	aprint_normal_dev(sc->sc_dev, "");
   11764 
   11765 	if (sc->sc_type == WM_T_I354) {
   11766 		uint32_t status;
   11767 
   11768 		status = CSR_READ(sc, WMREG_STATUS);
   11769 		if (((status & STATUS_2P5_SKU) != 0)
   11770 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11771 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11772 		} else
   11773 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11774 	} else if (sc->sc_type == WM_T_82545) {
   11775 		/* Only 82545 is LX (XXX except SFP) */
   11776 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11777 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11778 	} else {
   11779 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11780 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11781 	}
   11782 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11783 	aprint_normal("\n");
   11784 
   11785 #undef ADD
   11786 
   11787 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11788 }
   11789 
   11790 /*
   11791  * wm_tbi_mediachange:	[ifmedia interface function]
   11792  *
   11793  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11794  */
   11795 static int
   11796 wm_tbi_mediachange(struct ifnet *ifp)
   11797 {
   11798 	struct wm_softc *sc = ifp->if_softc;
   11799 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11800 	uint32_t status, ctrl;
   11801 	bool signal;
   11802 	int i;
   11803 
   11804 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11805 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11806 		/* XXX need some work for >= 82571 and < 82575 */
   11807 		if (sc->sc_type < WM_T_82575)
   11808 			return 0;
   11809 	}
   11810 
   11811 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11812 	    || (sc->sc_type >= WM_T_82575))
   11813 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11814 
   11815 	sc->sc_ctrl &= ~CTRL_LRST;
   11816 	sc->sc_txcw = TXCW_ANE;
   11817 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11818 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11819 	else if (ife->ifm_media & IFM_FDX)
   11820 		sc->sc_txcw |= TXCW_FD;
   11821 	else
   11822 		sc->sc_txcw |= TXCW_HD;
   11823 
   11824 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11825 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11826 
   11827 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11828 		device_xname(sc->sc_dev), sc->sc_txcw));
   11829 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11830 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11831 	CSR_WRITE_FLUSH(sc);
   11832 	delay(1000);
   11833 
   11834 	ctrl =  CSR_READ(sc, WMREG_CTRL);
   11835 	signal = wm_tbi_havesignal(sc, ctrl);
   11836 
   11837 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11838 		signal));
   11839 
   11840 	if (signal) {
   11841 		/* Have signal; wait for the link to come up. */
   11842 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11843 			delay(10000);
   11844 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11845 				break;
   11846 		}
   11847 
   11848 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11849 			device_xname(sc->sc_dev),i));
   11850 
   11851 		status = CSR_READ(sc, WMREG_STATUS);
   11852 		DPRINTF(WM_DEBUG_LINK,
   11853 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11854 			device_xname(sc->sc_dev),status, STATUS_LU));
   11855 		if (status & STATUS_LU) {
   11856 			/* Link is up. */
   11857 			DPRINTF(WM_DEBUG_LINK,
   11858 			    ("%s: LINK: set media -> link up %s\n",
   11859 				device_xname(sc->sc_dev),
   11860 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11861 
   11862 			/*
   11863 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11864 			 * so we should update sc->sc_ctrl
   11865 			 */
   11866 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11867 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11868 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11869 			if (status & STATUS_FD)
   11870 				sc->sc_tctl |=
   11871 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11872 			else
   11873 				sc->sc_tctl |=
   11874 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11875 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11876 				sc->sc_fcrtl |= FCRTL_XONE;
   11877 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11878 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11879 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11880 			sc->sc_tbi_linkup = 1;
   11881 		} else {
   11882 			if (i == WM_LINKUP_TIMEOUT)
   11883 				wm_check_for_link(sc);
   11884 			/* Link is down. */
   11885 			DPRINTF(WM_DEBUG_LINK,
   11886 			    ("%s: LINK: set media -> link down\n",
   11887 				device_xname(sc->sc_dev)));
   11888 			sc->sc_tbi_linkup = 0;
   11889 		}
   11890 	} else {
   11891 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11892 			device_xname(sc->sc_dev)));
   11893 		sc->sc_tbi_linkup = 0;
   11894 	}
   11895 
   11896 	wm_tbi_serdes_set_linkled(sc);
   11897 
   11898 	return 0;
   11899 }
   11900 
   11901 /*
   11902  * wm_tbi_mediastatus:	[ifmedia interface function]
   11903  *
   11904  *	Get the current interface media status on a 1000BASE-X device.
   11905  */
   11906 static void
   11907 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11908 {
   11909 	struct wm_softc *sc = ifp->if_softc;
   11910 	uint32_t ctrl, status;
   11911 
   11912 	ifmr->ifm_status = IFM_AVALID;
   11913 	ifmr->ifm_active = IFM_ETHER;
   11914 
   11915 	status = CSR_READ(sc, WMREG_STATUS);
   11916 	if ((status & STATUS_LU) == 0) {
   11917 		ifmr->ifm_active |= IFM_NONE;
   11918 		return;
   11919 	}
   11920 
   11921 	ifmr->ifm_status |= IFM_ACTIVE;
   11922 	/* Only 82545 is LX */
   11923 	if (sc->sc_type == WM_T_82545)
   11924 		ifmr->ifm_active |= IFM_1000_LX;
   11925 	else
   11926 		ifmr->ifm_active |= IFM_1000_SX;
   11927 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11928 		ifmr->ifm_active |= IFM_FDX;
   11929 	else
   11930 		ifmr->ifm_active |= IFM_HDX;
   11931 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11932 	if (ctrl & CTRL_RFCE)
   11933 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11934 	if (ctrl & CTRL_TFCE)
   11935 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11936 }
   11937 
   11938 /* XXX TBI only */
   11939 static int
   11940 wm_check_for_link(struct wm_softc *sc)
   11941 {
   11942 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11943 	uint32_t rxcw;
   11944 	uint32_t ctrl;
   11945 	uint32_t status;
   11946 	bool signal;
   11947 
   11948 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11949 		device_xname(sc->sc_dev), __func__));
   11950 
   11951 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11952 		/* XXX need some work for >= 82571 */
   11953 		if (sc->sc_type >= WM_T_82571) {
   11954 			sc->sc_tbi_linkup = 1;
   11955 			return 0;
   11956 		}
   11957 	}
   11958 
   11959 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11960 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11961 	status = CSR_READ(sc, WMREG_STATUS);
   11962 	signal = wm_tbi_havesignal(sc, ctrl);
   11963 
   11964 	DPRINTF(WM_DEBUG_LINK,
   11965 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11966 		device_xname(sc->sc_dev), __func__, signal,
   11967 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11968 
   11969 	/*
   11970 	 * SWDPIN   LU RXCW
   11971 	 *	0    0	  0
   11972 	 *	0    0	  1	(should not happen)
   11973 	 *	0    1	  0	(should not happen)
   11974 	 *	0    1	  1	(should not happen)
   11975 	 *	1    0	  0	Disable autonego and force linkup
   11976 	 *	1    0	  1	got /C/ but not linkup yet
   11977 	 *	1    1	  0	(linkup)
   11978 	 *	1    1	  1	If IFM_AUTO, back to autonego
   11979 	 *
   11980 	 */
   11981 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   11982 		DPRINTF(WM_DEBUG_LINK,
   11983 		    ("%s: %s: force linkup and fullduplex\n",
   11984 			device_xname(sc->sc_dev), __func__));
   11985 		sc->sc_tbi_linkup = 0;
   11986 		/* Disable auto-negotiation in the TXCW register */
   11987 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11988 
   11989 		/*
   11990 		 * Force link-up and also force full-duplex.
   11991 		 *
   11992 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11993 		 * so we should update sc->sc_ctrl
   11994 		 */
   11995 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11996 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11997 	} else if (((status & STATUS_LU) != 0)
   11998 	    && ((rxcw & RXCW_C) != 0)
   11999 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12000 		sc->sc_tbi_linkup = 1;
   12001 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12002 			device_xname(sc->sc_dev),
   12003 			__func__));
   12004 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12005 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12006 	} else if (signal && ((rxcw & RXCW_C) != 0))
   12007 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12008 			device_xname(sc->sc_dev), __func__));
   12009 	else
   12010 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12011 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12012 			status));
   12013 
   12014 	return 0;
   12015 }
   12016 
   12017 /*
   12018  * wm_tbi_tick:
   12019  *
   12020  *	Check the link on TBI devices.
   12021  *	This function acts as mii_tick().
   12022  */
   12023 static void
   12024 wm_tbi_tick(struct wm_softc *sc)
   12025 {
   12026 	struct mii_data *mii = &sc->sc_mii;
   12027 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12028 	uint32_t status;
   12029 
   12030 	KASSERT(WM_CORE_LOCKED(sc));
   12031 
   12032 	status = CSR_READ(sc, WMREG_STATUS);
   12033 
   12034 	/* XXX is this needed? */
   12035 	(void)CSR_READ(sc, WMREG_RXCW);
   12036 	(void)CSR_READ(sc, WMREG_CTRL);
   12037 
   12038 	/* set link status */
   12039 	if ((status & STATUS_LU) == 0) {
   12040 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12041 			device_xname(sc->sc_dev)));
   12042 		sc->sc_tbi_linkup = 0;
   12043 	} else if (sc->sc_tbi_linkup == 0) {
   12044 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12045 			device_xname(sc->sc_dev),
   12046 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12047 		sc->sc_tbi_linkup = 1;
   12048 		sc->sc_tbi_serdes_ticks = 0;
   12049 	}
   12050 
   12051 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12052 		goto setled;
   12053 
   12054 	if ((status & STATUS_LU) == 0) {
   12055 		sc->sc_tbi_linkup = 0;
   12056 		/* If the timer expired, retry autonegotiation */
   12057 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12058 		    && (++sc->sc_tbi_serdes_ticks
   12059 			>= sc->sc_tbi_serdes_anegticks)) {
   12060 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12061 			sc->sc_tbi_serdes_ticks = 0;
   12062 			/*
   12063 			 * Reset the link, and let autonegotiation do
   12064 			 * its thing
   12065 			 */
   12066 			sc->sc_ctrl |= CTRL_LRST;
   12067 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12068 			CSR_WRITE_FLUSH(sc);
   12069 			delay(1000);
   12070 			sc->sc_ctrl &= ~CTRL_LRST;
   12071 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12072 			CSR_WRITE_FLUSH(sc);
   12073 			delay(1000);
   12074 			CSR_WRITE(sc, WMREG_TXCW,
   12075 			    sc->sc_txcw & ~TXCW_ANE);
   12076 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12077 		}
   12078 	}
   12079 
   12080 setled:
   12081 	wm_tbi_serdes_set_linkled(sc);
   12082 }
   12083 
   12084 /* SERDES related */
   12085 static void
   12086 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12087 {
   12088 	uint32_t reg;
   12089 
   12090 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12091 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12092 		return;
   12093 
   12094 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12095 	reg |= PCS_CFG_PCS_EN;
   12096 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12097 
   12098 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12099 	reg &= ~CTRL_EXT_SWDPIN(3);
   12100 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12101 	CSR_WRITE_FLUSH(sc);
   12102 }
   12103 
   12104 static int
   12105 wm_serdes_mediachange(struct ifnet *ifp)
   12106 {
   12107 	struct wm_softc *sc = ifp->if_softc;
   12108 	bool pcs_autoneg = true; /* XXX */
   12109 	uint32_t ctrl_ext, pcs_lctl, reg;
   12110 
   12111 	/* XXX Currently, this function is not called on 8257[12] */
   12112 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12113 	    || (sc->sc_type >= WM_T_82575))
   12114 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12115 
   12116 	wm_serdes_power_up_link_82575(sc);
   12117 
   12118 	sc->sc_ctrl |= CTRL_SLU;
   12119 
   12120 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12121 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12122 
   12123 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12124 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12125 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12126 	case CTRL_EXT_LINK_MODE_SGMII:
   12127 		pcs_autoneg = true;
   12128 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12129 		break;
   12130 	case CTRL_EXT_LINK_MODE_1000KX:
   12131 		pcs_autoneg = false;
   12132 		/* FALLTHROUGH */
   12133 	default:
   12134 		if ((sc->sc_type == WM_T_82575)
   12135 		    || (sc->sc_type == WM_T_82576)) {
   12136 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12137 				pcs_autoneg = false;
   12138 		}
   12139 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12140 		    | CTRL_FRCFDX;
   12141 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12142 	}
   12143 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12144 
   12145 	if (pcs_autoneg) {
   12146 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12147 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12148 
   12149 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12150 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12151 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12152 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12153 	} else
   12154 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12155 
   12156 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12157 
   12158 
   12159 	return 0;
   12160 }
   12161 
   12162 static void
   12163 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12164 {
   12165 	struct wm_softc *sc = ifp->if_softc;
   12166 	struct mii_data *mii = &sc->sc_mii;
   12167 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12168 	uint32_t pcs_adv, pcs_lpab, reg;
   12169 
   12170 	ifmr->ifm_status = IFM_AVALID;
   12171 	ifmr->ifm_active = IFM_ETHER;
   12172 
   12173 	/* Check PCS */
   12174 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12175 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12176 		ifmr->ifm_active |= IFM_NONE;
   12177 		sc->sc_tbi_linkup = 0;
   12178 		goto setled;
   12179 	}
   12180 
   12181 	sc->sc_tbi_linkup = 1;
   12182 	ifmr->ifm_status |= IFM_ACTIVE;
   12183 	if (sc->sc_type == WM_T_I354) {
   12184 		uint32_t status;
   12185 
   12186 		status = CSR_READ(sc, WMREG_STATUS);
   12187 		if (((status & STATUS_2P5_SKU) != 0)
   12188 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12189 			ifmr->ifm_active |= IFM_2500_KX;
   12190 		} else
   12191 			ifmr->ifm_active |= IFM_1000_KX;
   12192 	} else {
   12193 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12194 		case PCS_LSTS_SPEED_10:
   12195 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12196 			break;
   12197 		case PCS_LSTS_SPEED_100:
   12198 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12199 			break;
   12200 		case PCS_LSTS_SPEED_1000:
   12201 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12202 			break;
   12203 		default:
   12204 			device_printf(sc->sc_dev, "Unknown speed\n");
   12205 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12206 			break;
   12207 		}
   12208 	}
   12209 	if ((reg & PCS_LSTS_FDX) != 0)
   12210 		ifmr->ifm_active |= IFM_FDX;
   12211 	else
   12212 		ifmr->ifm_active |= IFM_HDX;
   12213 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12214 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12215 		/* Check flow */
   12216 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12217 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12218 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12219 			goto setled;
   12220 		}
   12221 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12222 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12223 		DPRINTF(WM_DEBUG_LINK,
   12224 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12225 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12226 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12227 			mii->mii_media_active |= IFM_FLOW
   12228 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12229 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12230 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12231 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12232 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12233 			mii->mii_media_active |= IFM_FLOW
   12234 			    | IFM_ETH_TXPAUSE;
   12235 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12236 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12237 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12238 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12239 			mii->mii_media_active |= IFM_FLOW
   12240 			    | IFM_ETH_RXPAUSE;
   12241 		}
   12242 	}
   12243 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12244 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12245 setled:
   12246 	wm_tbi_serdes_set_linkled(sc);
   12247 }
   12248 
   12249 /*
   12250  * wm_serdes_tick:
   12251  *
   12252  *	Check the link on serdes devices.
   12253  */
   12254 static void
   12255 wm_serdes_tick(struct wm_softc *sc)
   12256 {
   12257 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12258 	struct mii_data *mii = &sc->sc_mii;
   12259 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12260 	uint32_t reg;
   12261 
   12262 	KASSERT(WM_CORE_LOCKED(sc));
   12263 
   12264 	mii->mii_media_status = IFM_AVALID;
   12265 	mii->mii_media_active = IFM_ETHER;
   12266 
   12267 	/* Check PCS */
   12268 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12269 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12270 		mii->mii_media_status |= IFM_ACTIVE;
   12271 		sc->sc_tbi_linkup = 1;
   12272 		sc->sc_tbi_serdes_ticks = 0;
   12273 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12274 		if ((reg & PCS_LSTS_FDX) != 0)
   12275 			mii->mii_media_active |= IFM_FDX;
   12276 		else
   12277 			mii->mii_media_active |= IFM_HDX;
   12278 	} else {
   12279 		mii->mii_media_status |= IFM_NONE;
   12280 		sc->sc_tbi_linkup = 0;
   12281 		/* If the timer expired, retry autonegotiation */
   12282 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12283 		    && (++sc->sc_tbi_serdes_ticks
   12284 			>= sc->sc_tbi_serdes_anegticks)) {
   12285 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12286 			sc->sc_tbi_serdes_ticks = 0;
   12287 			/* XXX */
   12288 			wm_serdes_mediachange(ifp);
   12289 		}
   12290 	}
   12291 
   12292 	wm_tbi_serdes_set_linkled(sc);
   12293 }
   12294 
   12295 /* SFP related */
   12296 
   12297 static int
   12298 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12299 {
   12300 	uint32_t i2ccmd;
   12301 	int i;
   12302 
   12303 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12304 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12305 
   12306 	/* Poll the ready bit */
   12307 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12308 		delay(50);
   12309 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12310 		if (i2ccmd & I2CCMD_READY)
   12311 			break;
   12312 	}
   12313 	if ((i2ccmd & I2CCMD_READY) == 0)
   12314 		return -1;
   12315 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12316 		return -1;
   12317 
   12318 	*data = i2ccmd & 0x00ff;
   12319 
   12320 	return 0;
   12321 }
   12322 
   12323 static uint32_t
   12324 wm_sfp_get_media_type(struct wm_softc *sc)
   12325 {
   12326 	uint32_t ctrl_ext;
   12327 	uint8_t val = 0;
   12328 	int timeout = 3;
   12329 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12330 	int rv = -1;
   12331 
   12332 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12333 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12334 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12335 	CSR_WRITE_FLUSH(sc);
   12336 
   12337 	/* Read SFP module data */
   12338 	while (timeout) {
   12339 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12340 		if (rv == 0)
   12341 			break;
   12342 		delay(100*1000); /* XXX too big */
   12343 		timeout--;
   12344 	}
   12345 	if (rv != 0)
   12346 		goto out;
   12347 	switch (val) {
   12348 	case SFF_SFP_ID_SFF:
   12349 		aprint_normal_dev(sc->sc_dev,
   12350 		    "Module/Connector soldered to board\n");
   12351 		break;
   12352 	case SFF_SFP_ID_SFP:
   12353 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   12354 		break;
   12355 	case SFF_SFP_ID_UNKNOWN:
   12356 		goto out;
   12357 	default:
   12358 		break;
   12359 	}
   12360 
   12361 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12362 	if (rv != 0) {
   12363 		goto out;
   12364 	}
   12365 
   12366 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12367 		mediatype = WM_MEDIATYPE_SERDES;
   12368 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12369 		sc->sc_flags |= WM_F_SGMII;
   12370 		mediatype = WM_MEDIATYPE_COPPER;
   12371 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12372 		sc->sc_flags |= WM_F_SGMII;
   12373 		mediatype = WM_MEDIATYPE_SERDES;
   12374 	}
   12375 
   12376 out:
   12377 	/* Restore I2C interface setting */
   12378 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12379 
   12380 	return mediatype;
   12381 }
   12382 
   12383 /*
   12384  * NVM related.
   12385  * Microwire, SPI (w/wo EERD) and Flash.
   12386  */
   12387 
   12388 /* Both spi and uwire */
   12389 
   12390 /*
   12391  * wm_eeprom_sendbits:
   12392  *
   12393  *	Send a series of bits to the EEPROM.
   12394  */
   12395 static void
   12396 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12397 {
   12398 	uint32_t reg;
   12399 	int x;
   12400 
   12401 	reg = CSR_READ(sc, WMREG_EECD);
   12402 
   12403 	for (x = nbits; x > 0; x--) {
   12404 		if (bits & (1U << (x - 1)))
   12405 			reg |= EECD_DI;
   12406 		else
   12407 			reg &= ~EECD_DI;
   12408 		CSR_WRITE(sc, WMREG_EECD, reg);
   12409 		CSR_WRITE_FLUSH(sc);
   12410 		delay(2);
   12411 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12412 		CSR_WRITE_FLUSH(sc);
   12413 		delay(2);
   12414 		CSR_WRITE(sc, WMREG_EECD, reg);
   12415 		CSR_WRITE_FLUSH(sc);
   12416 		delay(2);
   12417 	}
   12418 }
   12419 
   12420 /*
   12421  * wm_eeprom_recvbits:
   12422  *
   12423  *	Receive a series of bits from the EEPROM.
   12424  */
   12425 static void
   12426 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12427 {
   12428 	uint32_t reg, val;
   12429 	int x;
   12430 
   12431 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12432 
   12433 	val = 0;
   12434 	for (x = nbits; x > 0; x--) {
   12435 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12436 		CSR_WRITE_FLUSH(sc);
   12437 		delay(2);
   12438 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12439 			val |= (1U << (x - 1));
   12440 		CSR_WRITE(sc, WMREG_EECD, reg);
   12441 		CSR_WRITE_FLUSH(sc);
   12442 		delay(2);
   12443 	}
   12444 	*valp = val;
   12445 }
   12446 
   12447 /* Microwire */
   12448 
   12449 /*
   12450  * wm_nvm_read_uwire:
   12451  *
   12452  *	Read a word from the EEPROM using the MicroWire protocol.
   12453  */
   12454 static int
   12455 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12456 {
   12457 	uint32_t reg, val;
   12458 	int i;
   12459 
   12460 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12461 		device_xname(sc->sc_dev), __func__));
   12462 
   12463 	if (sc->nvm.acquire(sc) != 0)
   12464 		return -1;
   12465 
   12466 	for (i = 0; i < wordcnt; i++) {
   12467 		/* Clear SK and DI. */
   12468 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12469 		CSR_WRITE(sc, WMREG_EECD, reg);
   12470 
   12471 		/*
   12472 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12473 		 * and Xen.
   12474 		 *
   12475 		 * We use this workaround only for 82540 because qemu's
   12476 		 * e1000 act as 82540.
   12477 		 */
   12478 		if (sc->sc_type == WM_T_82540) {
   12479 			reg |= EECD_SK;
   12480 			CSR_WRITE(sc, WMREG_EECD, reg);
   12481 			reg &= ~EECD_SK;
   12482 			CSR_WRITE(sc, WMREG_EECD, reg);
   12483 			CSR_WRITE_FLUSH(sc);
   12484 			delay(2);
   12485 		}
   12486 		/* XXX: end of workaround */
   12487 
   12488 		/* Set CHIP SELECT. */
   12489 		reg |= EECD_CS;
   12490 		CSR_WRITE(sc, WMREG_EECD, reg);
   12491 		CSR_WRITE_FLUSH(sc);
   12492 		delay(2);
   12493 
   12494 		/* Shift in the READ command. */
   12495 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12496 
   12497 		/* Shift in address. */
   12498 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12499 
   12500 		/* Shift out the data. */
   12501 		wm_eeprom_recvbits(sc, &val, 16);
   12502 		data[i] = val & 0xffff;
   12503 
   12504 		/* Clear CHIP SELECT. */
   12505 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12506 		CSR_WRITE(sc, WMREG_EECD, reg);
   12507 		CSR_WRITE_FLUSH(sc);
   12508 		delay(2);
   12509 	}
   12510 
   12511 	sc->nvm.release(sc);
   12512 	return 0;
   12513 }
   12514 
   12515 /* SPI */
   12516 
   12517 /*
   12518  * Set SPI and FLASH related information from the EECD register.
   12519  * For 82541 and 82547, the word size is taken from EEPROM.
   12520  */
   12521 static int
   12522 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12523 {
   12524 	int size;
   12525 	uint32_t reg;
   12526 	uint16_t data;
   12527 
   12528 	reg = CSR_READ(sc, WMREG_EECD);
   12529 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12530 
   12531 	/* Read the size of NVM from EECD by default */
   12532 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12533 	switch (sc->sc_type) {
   12534 	case WM_T_82541:
   12535 	case WM_T_82541_2:
   12536 	case WM_T_82547:
   12537 	case WM_T_82547_2:
   12538 		/* Set dummy value to access EEPROM */
   12539 		sc->sc_nvm_wordsize = 64;
   12540 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12541 			aprint_error_dev(sc->sc_dev,
   12542 			    "%s: failed to read EEPROM size\n", __func__);
   12543 		}
   12544 		reg = data;
   12545 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12546 		if (size == 0)
   12547 			size = 6; /* 64 word size */
   12548 		else
   12549 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12550 		break;
   12551 	case WM_T_80003:
   12552 	case WM_T_82571:
   12553 	case WM_T_82572:
   12554 	case WM_T_82573: /* SPI case */
   12555 	case WM_T_82574: /* SPI case */
   12556 	case WM_T_82583: /* SPI case */
   12557 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12558 		if (size > 14)
   12559 			size = 14;
   12560 		break;
   12561 	case WM_T_82575:
   12562 	case WM_T_82576:
   12563 	case WM_T_82580:
   12564 	case WM_T_I350:
   12565 	case WM_T_I354:
   12566 	case WM_T_I210:
   12567 	case WM_T_I211:
   12568 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12569 		if (size > 15)
   12570 			size = 15;
   12571 		break;
   12572 	default:
   12573 		aprint_error_dev(sc->sc_dev,
   12574 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12575 		return -1;
   12576 		break;
   12577 	}
   12578 
   12579 	sc->sc_nvm_wordsize = 1 << size;
   12580 
   12581 	return 0;
   12582 }
   12583 
   12584 /*
   12585  * wm_nvm_ready_spi:
   12586  *
   12587  *	Wait for a SPI EEPROM to be ready for commands.
   12588  */
   12589 static int
   12590 wm_nvm_ready_spi(struct wm_softc *sc)
   12591 {
   12592 	uint32_t val;
   12593 	int usec;
   12594 
   12595 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12596 		device_xname(sc->sc_dev), __func__));
   12597 
   12598 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12599 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12600 		wm_eeprom_recvbits(sc, &val, 8);
   12601 		if ((val & SPI_SR_RDY) == 0)
   12602 			break;
   12603 	}
   12604 	if (usec >= SPI_MAX_RETRIES) {
   12605 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12606 		return -1;
   12607 	}
   12608 	return 0;
   12609 }
   12610 
   12611 /*
   12612  * wm_nvm_read_spi:
   12613  *
   12614  *	Read a work from the EEPROM using the SPI protocol.
   12615  */
   12616 static int
   12617 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12618 {
   12619 	uint32_t reg, val;
   12620 	int i;
   12621 	uint8_t opc;
   12622 	int rv = 0;
   12623 
   12624 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12625 		device_xname(sc->sc_dev), __func__));
   12626 
   12627 	if (sc->nvm.acquire(sc) != 0)
   12628 		return -1;
   12629 
   12630 	/* Clear SK and CS. */
   12631 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12632 	CSR_WRITE(sc, WMREG_EECD, reg);
   12633 	CSR_WRITE_FLUSH(sc);
   12634 	delay(2);
   12635 
   12636 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12637 		goto out;
   12638 
   12639 	/* Toggle CS to flush commands. */
   12640 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12641 	CSR_WRITE_FLUSH(sc);
   12642 	delay(2);
   12643 	CSR_WRITE(sc, WMREG_EECD, reg);
   12644 	CSR_WRITE_FLUSH(sc);
   12645 	delay(2);
   12646 
   12647 	opc = SPI_OPC_READ;
   12648 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12649 		opc |= SPI_OPC_A8;
   12650 
   12651 	wm_eeprom_sendbits(sc, opc, 8);
   12652 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12653 
   12654 	for (i = 0; i < wordcnt; i++) {
   12655 		wm_eeprom_recvbits(sc, &val, 16);
   12656 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12657 	}
   12658 
   12659 	/* Raise CS and clear SK. */
   12660 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12661 	CSR_WRITE(sc, WMREG_EECD, reg);
   12662 	CSR_WRITE_FLUSH(sc);
   12663 	delay(2);
   12664 
   12665 out:
   12666 	sc->nvm.release(sc);
   12667 	return rv;
   12668 }
   12669 
   12670 /* Using with EERD */
   12671 
   12672 static int
   12673 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12674 {
   12675 	uint32_t attempts = 100000;
   12676 	uint32_t i, reg = 0;
   12677 	int32_t done = -1;
   12678 
   12679 	for (i = 0; i < attempts; i++) {
   12680 		reg = CSR_READ(sc, rw);
   12681 
   12682 		if (reg & EERD_DONE) {
   12683 			done = 0;
   12684 			break;
   12685 		}
   12686 		delay(5);
   12687 	}
   12688 
   12689 	return done;
   12690 }
   12691 
   12692 static int
   12693 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12694 {
   12695 	int i, eerd = 0;
   12696 	int rv = 0;
   12697 
   12698 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12699 		device_xname(sc->sc_dev), __func__));
   12700 
   12701 	if (sc->nvm.acquire(sc) != 0)
   12702 		return -1;
   12703 
   12704 	for (i = 0; i < wordcnt; i++) {
   12705 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12706 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12707 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12708 		if (rv != 0) {
   12709 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12710 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12711 			break;
   12712 		}
   12713 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12714 	}
   12715 
   12716 	sc->nvm.release(sc);
   12717 	return rv;
   12718 }
   12719 
   12720 /* Flash */
   12721 
   12722 static int
   12723 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12724 {
   12725 	uint32_t eecd;
   12726 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12727 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12728 	uint32_t nvm_dword = 0;
   12729 	uint8_t sig_byte = 0;
   12730 	int rv;
   12731 
   12732 	switch (sc->sc_type) {
   12733 	case WM_T_PCH_SPT:
   12734 	case WM_T_PCH_CNP:
   12735 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12736 		act_offset = ICH_NVM_SIG_WORD * 2;
   12737 
   12738 		/* set bank to 0 in case flash read fails. */
   12739 		*bank = 0;
   12740 
   12741 		/* Check bank 0 */
   12742 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12743 		if (rv != 0)
   12744 			return rv;
   12745 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12746 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12747 			*bank = 0;
   12748 			return 0;
   12749 		}
   12750 
   12751 		/* Check bank 1 */
   12752 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12753 		    &nvm_dword);
   12754 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12755 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12756 			*bank = 1;
   12757 			return 0;
   12758 		}
   12759 		aprint_error_dev(sc->sc_dev,
   12760 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12761 		return -1;
   12762 	case WM_T_ICH8:
   12763 	case WM_T_ICH9:
   12764 		eecd = CSR_READ(sc, WMREG_EECD);
   12765 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12766 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12767 			return 0;
   12768 		}
   12769 		/* FALLTHROUGH */
   12770 	default:
   12771 		/* Default to 0 */
   12772 		*bank = 0;
   12773 
   12774 		/* Check bank 0 */
   12775 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12776 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12777 			*bank = 0;
   12778 			return 0;
   12779 		}
   12780 
   12781 		/* Check bank 1 */
   12782 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12783 		    &sig_byte);
   12784 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12785 			*bank = 1;
   12786 			return 0;
   12787 		}
   12788 	}
   12789 
   12790 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12791 		device_xname(sc->sc_dev)));
   12792 	return -1;
   12793 }
   12794 
   12795 /******************************************************************************
   12796  * This function does initial flash setup so that a new read/write/erase cycle
   12797  * can be started.
   12798  *
   12799  * sc - The pointer to the hw structure
   12800  ****************************************************************************/
   12801 static int32_t
   12802 wm_ich8_cycle_init(struct wm_softc *sc)
   12803 {
   12804 	uint16_t hsfsts;
   12805 	int32_t error = 1;
   12806 	int32_t i     = 0;
   12807 
   12808 	if (sc->sc_type >= WM_T_PCH_SPT)
   12809 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12810 	else
   12811 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12812 
   12813 	/* May be check the Flash Des Valid bit in Hw status */
   12814 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12815 		return error;
   12816 
   12817 	/* Clear FCERR in Hw status by writing 1 */
   12818 	/* Clear DAEL in Hw status by writing a 1 */
   12819 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12820 
   12821 	if (sc->sc_type >= WM_T_PCH_SPT)
   12822 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12823 	else
   12824 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12825 
   12826 	/*
   12827 	 * Either we should have a hardware SPI cycle in progress bit to check
   12828 	 * against, in order to start a new cycle or FDONE bit should be
   12829 	 * changed in the hardware so that it is 1 after harware reset, which
   12830 	 * can then be used as an indication whether a cycle is in progress or
   12831 	 * has been completed .. we should also have some software semaphore
   12832 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12833 	 * threads access to those bits can be sequentiallized or a way so that
   12834 	 * 2 threads dont start the cycle at the same time
   12835 	 */
   12836 
   12837 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12838 		/*
   12839 		 * There is no cycle running at present, so we can start a
   12840 		 * cycle
   12841 		 */
   12842 
   12843 		/* Begin by setting Flash Cycle Done. */
   12844 		hsfsts |= HSFSTS_DONE;
   12845 		if (sc->sc_type >= WM_T_PCH_SPT)
   12846 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12847 			    hsfsts & 0xffffUL);
   12848 		else
   12849 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12850 		error = 0;
   12851 	} else {
   12852 		/*
   12853 		 * otherwise poll for sometime so the current cycle has a
   12854 		 * chance to end before giving up.
   12855 		 */
   12856 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12857 			if (sc->sc_type >= WM_T_PCH_SPT)
   12858 				hsfsts = ICH8_FLASH_READ32(sc,
   12859 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12860 			else
   12861 				hsfsts = ICH8_FLASH_READ16(sc,
   12862 				    ICH_FLASH_HSFSTS);
   12863 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12864 				error = 0;
   12865 				break;
   12866 			}
   12867 			delay(1);
   12868 		}
   12869 		if (error == 0) {
   12870 			/*
   12871 			 * Successful in waiting for previous cycle to timeout,
   12872 			 * now set the Flash Cycle Done.
   12873 			 */
   12874 			hsfsts |= HSFSTS_DONE;
   12875 			if (sc->sc_type >= WM_T_PCH_SPT)
   12876 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12877 				    hsfsts & 0xffffUL);
   12878 			else
   12879 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12880 				    hsfsts);
   12881 		}
   12882 	}
   12883 	return error;
   12884 }
   12885 
   12886 /******************************************************************************
   12887  * This function starts a flash cycle and waits for its completion
   12888  *
   12889  * sc - The pointer to the hw structure
   12890  ****************************************************************************/
   12891 static int32_t
   12892 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12893 {
   12894 	uint16_t hsflctl;
   12895 	uint16_t hsfsts;
   12896 	int32_t error = 1;
   12897 	uint32_t i = 0;
   12898 
   12899 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12900 	if (sc->sc_type >= WM_T_PCH_SPT)
   12901 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12902 	else
   12903 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12904 	hsflctl |= HSFCTL_GO;
   12905 	if (sc->sc_type >= WM_T_PCH_SPT)
   12906 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12907 		    (uint32_t)hsflctl << 16);
   12908 	else
   12909 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12910 
   12911 	/* Wait till FDONE bit is set to 1 */
   12912 	do {
   12913 		if (sc->sc_type >= WM_T_PCH_SPT)
   12914 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12915 			    & 0xffffUL;
   12916 		else
   12917 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12918 		if (hsfsts & HSFSTS_DONE)
   12919 			break;
   12920 		delay(1);
   12921 		i++;
   12922 	} while (i < timeout);
   12923 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12924 		error = 0;
   12925 
   12926 	return error;
   12927 }
   12928 
   12929 /******************************************************************************
   12930  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12931  *
   12932  * sc - The pointer to the hw structure
   12933  * index - The index of the byte or word to read.
   12934  * size - Size of data to read, 1=byte 2=word, 4=dword
   12935  * data - Pointer to the word to store the value read.
   12936  *****************************************************************************/
   12937 static int32_t
   12938 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12939     uint32_t size, uint32_t *data)
   12940 {
   12941 	uint16_t hsfsts;
   12942 	uint16_t hsflctl;
   12943 	uint32_t flash_linear_address;
   12944 	uint32_t flash_data = 0;
   12945 	int32_t error = 1;
   12946 	int32_t count = 0;
   12947 
   12948 	if (size < 1  || size > 4 || data == 0x0 ||
   12949 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12950 		return error;
   12951 
   12952 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12953 	    sc->sc_ich8_flash_base;
   12954 
   12955 	do {
   12956 		delay(1);
   12957 		/* Steps */
   12958 		error = wm_ich8_cycle_init(sc);
   12959 		if (error)
   12960 			break;
   12961 
   12962 		if (sc->sc_type >= WM_T_PCH_SPT)
   12963 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12964 			    >> 16;
   12965 		else
   12966 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12967 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12968 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12969 		    & HSFCTL_BCOUNT_MASK;
   12970 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12971 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12972 			/*
   12973 			 * In SPT, This register is in Lan memory space, not
   12974 			 * flash. Therefore, only 32 bit access is supported.
   12975 			 */
   12976 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12977 			    (uint32_t)hsflctl << 16);
   12978 		} else
   12979 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12980 
   12981 		/*
   12982 		 * Write the last 24 bits of index into Flash Linear address
   12983 		 * field in Flash Address
   12984 		 */
   12985 		/* TODO: TBD maybe check the index against the size of flash */
   12986 
   12987 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12988 
   12989 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12990 
   12991 		/*
   12992 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12993 		 * the whole sequence a few more times, else read in (shift in)
   12994 		 * the Flash Data0, the order is least significant byte first
   12995 		 * msb to lsb
   12996 		 */
   12997 		if (error == 0) {
   12998 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12999 			if (size == 1)
   13000 				*data = (uint8_t)(flash_data & 0x000000FF);
   13001 			else if (size == 2)
   13002 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13003 			else if (size == 4)
   13004 				*data = (uint32_t)flash_data;
   13005 			break;
   13006 		} else {
   13007 			/*
   13008 			 * If we've gotten here, then things are probably
   13009 			 * completely hosed, but if the error condition is
   13010 			 * detected, it won't hurt to give it another try...
   13011 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13012 			 */
   13013 			if (sc->sc_type >= WM_T_PCH_SPT)
   13014 				hsfsts = ICH8_FLASH_READ32(sc,
   13015 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13016 			else
   13017 				hsfsts = ICH8_FLASH_READ16(sc,
   13018 				    ICH_FLASH_HSFSTS);
   13019 
   13020 			if (hsfsts & HSFSTS_ERR) {
   13021 				/* Repeat for some time before giving up. */
   13022 				continue;
   13023 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13024 				break;
   13025 		}
   13026 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13027 
   13028 	return error;
   13029 }
   13030 
   13031 /******************************************************************************
   13032  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13033  *
   13034  * sc - pointer to wm_hw structure
   13035  * index - The index of the byte to read.
   13036  * data - Pointer to a byte to store the value read.
   13037  *****************************************************************************/
   13038 static int32_t
   13039 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13040 {
   13041 	int32_t status;
   13042 	uint32_t word = 0;
   13043 
   13044 	status = wm_read_ich8_data(sc, index, 1, &word);
   13045 	if (status == 0)
   13046 		*data = (uint8_t)word;
   13047 	else
   13048 		*data = 0;
   13049 
   13050 	return status;
   13051 }
   13052 
   13053 /******************************************************************************
   13054  * Reads a word from the NVM using the ICH8 flash access registers.
   13055  *
   13056  * sc - pointer to wm_hw structure
   13057  * index - The starting byte index of the word to read.
   13058  * data - Pointer to a word to store the value read.
   13059  *****************************************************************************/
   13060 static int32_t
   13061 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13062 {
   13063 	int32_t status;
   13064 	uint32_t word = 0;
   13065 
   13066 	status = wm_read_ich8_data(sc, index, 2, &word);
   13067 	if (status == 0)
   13068 		*data = (uint16_t)word;
   13069 	else
   13070 		*data = 0;
   13071 
   13072 	return status;
   13073 }
   13074 
   13075 /******************************************************************************
   13076  * Reads a dword from the NVM using the ICH8 flash access registers.
   13077  *
   13078  * sc - pointer to wm_hw structure
   13079  * index - The starting byte index of the word to read.
   13080  * data - Pointer to a word to store the value read.
   13081  *****************************************************************************/
   13082 static int32_t
   13083 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13084 {
   13085 	int32_t status;
   13086 
   13087 	status = wm_read_ich8_data(sc, index, 4, data);
   13088 	return status;
   13089 }
   13090 
   13091 /******************************************************************************
   13092  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13093  * register.
   13094  *
   13095  * sc - Struct containing variables accessed by shared code
   13096  * offset - offset of word in the EEPROM to read
   13097  * data - word read from the EEPROM
   13098  * words - number of words to read
   13099  *****************************************************************************/
   13100 static int
   13101 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13102 {
   13103 	int32_t	 rv = 0;
   13104 	uint32_t flash_bank = 0;
   13105 	uint32_t act_offset = 0;
   13106 	uint32_t bank_offset = 0;
   13107 	uint16_t word = 0;
   13108 	uint16_t i = 0;
   13109 
   13110 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13111 		device_xname(sc->sc_dev), __func__));
   13112 
   13113 	if (sc->nvm.acquire(sc) != 0)
   13114 		return -1;
   13115 
   13116 	/*
   13117 	 * We need to know which is the valid flash bank.  In the event
   13118 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13119 	 * managing flash_bank. So it cannot be trusted and needs
   13120 	 * to be updated with each read.
   13121 	 */
   13122 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13123 	if (rv) {
   13124 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13125 			device_xname(sc->sc_dev)));
   13126 		flash_bank = 0;
   13127 	}
   13128 
   13129 	/*
   13130 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13131 	 * size
   13132 	 */
   13133 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13134 
   13135 	for (i = 0; i < words; i++) {
   13136 		/* The NVM part needs a byte offset, hence * 2 */
   13137 		act_offset = bank_offset + ((offset + i) * 2);
   13138 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13139 		if (rv) {
   13140 			aprint_error_dev(sc->sc_dev,
   13141 			    "%s: failed to read NVM\n", __func__);
   13142 			break;
   13143 		}
   13144 		data[i] = word;
   13145 	}
   13146 
   13147 	sc->nvm.release(sc);
   13148 	return rv;
   13149 }
   13150 
   13151 /******************************************************************************
   13152  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13153  * register.
   13154  *
   13155  * sc - Struct containing variables accessed by shared code
   13156  * offset - offset of word in the EEPROM to read
   13157  * data - word read from the EEPROM
   13158  * words - number of words to read
   13159  *****************************************************************************/
   13160 static int
   13161 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13162 {
   13163 	int32_t	 rv = 0;
   13164 	uint32_t flash_bank = 0;
   13165 	uint32_t act_offset = 0;
   13166 	uint32_t bank_offset = 0;
   13167 	uint32_t dword = 0;
   13168 	uint16_t i = 0;
   13169 
   13170 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13171 		device_xname(sc->sc_dev), __func__));
   13172 
   13173 	if (sc->nvm.acquire(sc) != 0)
   13174 		return -1;
   13175 
   13176 	/*
   13177 	 * We need to know which is the valid flash bank.  In the event
   13178 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13179 	 * managing flash_bank. So it cannot be trusted and needs
   13180 	 * to be updated with each read.
   13181 	 */
   13182 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13183 	if (rv) {
   13184 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13185 			device_xname(sc->sc_dev)));
   13186 		flash_bank = 0;
   13187 	}
   13188 
   13189 	/*
   13190 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13191 	 * size
   13192 	 */
   13193 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13194 
   13195 	for (i = 0; i < words; i++) {
   13196 		/* The NVM part needs a byte offset, hence * 2 */
   13197 		act_offset = bank_offset + ((offset + i) * 2);
   13198 		/* but we must read dword aligned, so mask ... */
   13199 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13200 		if (rv) {
   13201 			aprint_error_dev(sc->sc_dev,
   13202 			    "%s: failed to read NVM\n", __func__);
   13203 			break;
   13204 		}
   13205 		/* ... and pick out low or high word */
   13206 		if ((act_offset & 0x2) == 0)
   13207 			data[i] = (uint16_t)(dword & 0xFFFF);
   13208 		else
   13209 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13210 	}
   13211 
   13212 	sc->nvm.release(sc);
   13213 	return rv;
   13214 }
   13215 
   13216 /* iNVM */
   13217 
   13218 static int
   13219 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13220 {
   13221 	int32_t	 rv = 0;
   13222 	uint32_t invm_dword;
   13223 	uint16_t i;
   13224 	uint8_t record_type, word_address;
   13225 
   13226 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13227 		device_xname(sc->sc_dev), __func__));
   13228 
   13229 	for (i = 0; i < INVM_SIZE; i++) {
   13230 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13231 		/* Get record type */
   13232 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13233 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13234 			break;
   13235 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13236 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13237 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13238 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13239 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13240 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13241 			if (word_address == address) {
   13242 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13243 				rv = 0;
   13244 				break;
   13245 			}
   13246 		}
   13247 	}
   13248 
   13249 	return rv;
   13250 }
   13251 
   13252 static int
   13253 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13254 {
   13255 	int rv = 0;
   13256 	int i;
   13257 
   13258 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13259 		device_xname(sc->sc_dev), __func__));
   13260 
   13261 	if (sc->nvm.acquire(sc) != 0)
   13262 		return -1;
   13263 
   13264 	for (i = 0; i < words; i++) {
   13265 		switch (offset + i) {
   13266 		case NVM_OFF_MACADDR:
   13267 		case NVM_OFF_MACADDR1:
   13268 		case NVM_OFF_MACADDR2:
   13269 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13270 			if (rv != 0) {
   13271 				data[i] = 0xffff;
   13272 				rv = -1;
   13273 			}
   13274 			break;
   13275 		case NVM_OFF_CFG2:
   13276 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13277 			if (rv != 0) {
   13278 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13279 				rv = 0;
   13280 			}
   13281 			break;
   13282 		case NVM_OFF_CFG4:
   13283 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13284 			if (rv != 0) {
   13285 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13286 				rv = 0;
   13287 			}
   13288 			break;
   13289 		case NVM_OFF_LED_1_CFG:
   13290 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13291 			if (rv != 0) {
   13292 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13293 				rv = 0;
   13294 			}
   13295 			break;
   13296 		case NVM_OFF_LED_0_2_CFG:
   13297 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13298 			if (rv != 0) {
   13299 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13300 				rv = 0;
   13301 			}
   13302 			break;
   13303 		case NVM_OFF_ID_LED_SETTINGS:
   13304 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13305 			if (rv != 0) {
   13306 				*data = ID_LED_RESERVED_FFFF;
   13307 				rv = 0;
   13308 			}
   13309 			break;
   13310 		default:
   13311 			DPRINTF(WM_DEBUG_NVM,
   13312 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13313 			*data = NVM_RESERVED_WORD;
   13314 			break;
   13315 		}
   13316 	}
   13317 
   13318 	sc->nvm.release(sc);
   13319 	return rv;
   13320 }
   13321 
   13322 /* Lock, detecting NVM type, validate checksum, version and read */
   13323 
   13324 static int
   13325 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13326 {
   13327 	uint32_t eecd = 0;
   13328 
   13329 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13330 	    || sc->sc_type == WM_T_82583) {
   13331 		eecd = CSR_READ(sc, WMREG_EECD);
   13332 
   13333 		/* Isolate bits 15 & 16 */
   13334 		eecd = ((eecd >> 15) & 0x03);
   13335 
   13336 		/* If both bits are set, device is Flash type */
   13337 		if (eecd == 0x03)
   13338 			return 0;
   13339 	}
   13340 	return 1;
   13341 }
   13342 
   13343 static int
   13344 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13345 {
   13346 	uint32_t eec;
   13347 
   13348 	eec = CSR_READ(sc, WMREG_EEC);
   13349 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13350 		return 1;
   13351 
   13352 	return 0;
   13353 }
   13354 
   13355 /*
   13356  * wm_nvm_validate_checksum
   13357  *
   13358  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13359  */
   13360 static int
   13361 wm_nvm_validate_checksum(struct wm_softc *sc)
   13362 {
   13363 	uint16_t checksum;
   13364 	uint16_t eeprom_data;
   13365 #ifdef WM_DEBUG
   13366 	uint16_t csum_wordaddr, valid_checksum;
   13367 #endif
   13368 	int i;
   13369 
   13370 	checksum = 0;
   13371 
   13372 	/* Don't check for I211 */
   13373 	if (sc->sc_type == WM_T_I211)
   13374 		return 0;
   13375 
   13376 #ifdef WM_DEBUG
   13377 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13378 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13379 		csum_wordaddr = NVM_OFF_COMPAT;
   13380 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13381 	} else {
   13382 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13383 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13384 	}
   13385 
   13386 	/* Dump EEPROM image for debug */
   13387 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13388 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13389 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13390 		/* XXX PCH_SPT? */
   13391 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13392 		if ((eeprom_data & valid_checksum) == 0)
   13393 			DPRINTF(WM_DEBUG_NVM,
   13394 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13395 				device_xname(sc->sc_dev), eeprom_data,
   13396 				    valid_checksum));
   13397 	}
   13398 
   13399 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13400 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13401 		for (i = 0; i < NVM_SIZE; i++) {
   13402 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13403 				printf("XXXX ");
   13404 			else
   13405 				printf("%04hx ", eeprom_data);
   13406 			if (i % 8 == 7)
   13407 				printf("\n");
   13408 		}
   13409 	}
   13410 
   13411 #endif /* WM_DEBUG */
   13412 
   13413 	for (i = 0; i < NVM_SIZE; i++) {
   13414 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13415 			return 1;
   13416 		checksum += eeprom_data;
   13417 	}
   13418 
   13419 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13420 #ifdef WM_DEBUG
   13421 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13422 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13423 #endif
   13424 	}
   13425 
   13426 	return 0;
   13427 }
   13428 
   13429 static void
   13430 wm_nvm_version_invm(struct wm_softc *sc)
   13431 {
   13432 	uint32_t dword;
   13433 
   13434 	/*
   13435 	 * Linux's code to decode version is very strange, so we don't
   13436 	 * obey that algorithm and just use word 61 as the document.
   13437 	 * Perhaps it's not perfect though...
   13438 	 *
   13439 	 * Example:
   13440 	 *
   13441 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13442 	 */
   13443 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13444 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13445 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13446 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13447 }
   13448 
   13449 static void
   13450 wm_nvm_version(struct wm_softc *sc)
   13451 {
   13452 	uint16_t major, minor, build, patch;
   13453 	uint16_t uid0, uid1;
   13454 	uint16_t nvm_data;
   13455 	uint16_t off;
   13456 	bool check_version = false;
   13457 	bool check_optionrom = false;
   13458 	bool have_build = false;
   13459 	bool have_uid = true;
   13460 
   13461 	/*
   13462 	 * Version format:
   13463 	 *
   13464 	 * XYYZ
   13465 	 * X0YZ
   13466 	 * X0YY
   13467 	 *
   13468 	 * Example:
   13469 	 *
   13470 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13471 	 *	82571	0x50a6	5.10.6?
   13472 	 *	82572	0x506a	5.6.10?
   13473 	 *	82572EI	0x5069	5.6.9?
   13474 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13475 	 *		0x2013	2.1.3?
   13476 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   13477 	 */
   13478 
   13479 	/*
   13480 	 * XXX
   13481 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13482 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13483 	 */
   13484 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13485 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13486 		have_uid = false;
   13487 
   13488 	switch (sc->sc_type) {
   13489 	case WM_T_82571:
   13490 	case WM_T_82572:
   13491 	case WM_T_82574:
   13492 	case WM_T_82583:
   13493 		check_version = true;
   13494 		check_optionrom = true;
   13495 		have_build = true;
   13496 		break;
   13497 	case WM_T_82575:
   13498 	case WM_T_82576:
   13499 	case WM_T_82580:
   13500 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13501 			check_version = true;
   13502 		break;
   13503 	case WM_T_I211:
   13504 		wm_nvm_version_invm(sc);
   13505 		have_uid = false;
   13506 		goto printver;
   13507 	case WM_T_I210:
   13508 		if (!wm_nvm_flash_presence_i210(sc)) {
   13509 			wm_nvm_version_invm(sc);
   13510 			have_uid = false;
   13511 			goto printver;
   13512 		}
   13513 		/* FALLTHROUGH */
   13514 	case WM_T_I350:
   13515 	case WM_T_I354:
   13516 		check_version = true;
   13517 		check_optionrom = true;
   13518 		break;
   13519 	default:
   13520 		return;
   13521 	}
   13522 	if (check_version
   13523 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13524 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13525 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13526 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13527 			build = nvm_data & NVM_BUILD_MASK;
   13528 			have_build = true;
   13529 		} else
   13530 			minor = nvm_data & 0x00ff;
   13531 
   13532 		/* Decimal */
   13533 		minor = (minor / 16) * 10 + (minor % 16);
   13534 		sc->sc_nvm_ver_major = major;
   13535 		sc->sc_nvm_ver_minor = minor;
   13536 
   13537 printver:
   13538 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13539 		    sc->sc_nvm_ver_minor);
   13540 		if (have_build) {
   13541 			sc->sc_nvm_ver_build = build;
   13542 			aprint_verbose(".%d", build);
   13543 		}
   13544 	}
   13545 
   13546 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13547 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13548 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13549 		/* Option ROM Version */
   13550 		if ((off != 0x0000) && (off != 0xffff)) {
   13551 			int rv;
   13552 
   13553 			off += NVM_COMBO_VER_OFF;
   13554 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13555 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13556 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13557 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13558 				/* 16bits */
   13559 				major = uid0 >> 8;
   13560 				build = (uid0 << 8) | (uid1 >> 8);
   13561 				patch = uid1 & 0x00ff;
   13562 				aprint_verbose(", option ROM Version %d.%d.%d",
   13563 				    major, build, patch);
   13564 			}
   13565 		}
   13566 	}
   13567 
   13568 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13569 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13570 }
   13571 
   13572 /*
   13573  * wm_nvm_read:
   13574  *
   13575  *	Read data from the serial EEPROM.
   13576  */
   13577 static int
   13578 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13579 {
   13580 	int rv;
   13581 
   13582 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13583 		device_xname(sc->sc_dev), __func__));
   13584 
   13585 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13586 		return -1;
   13587 
   13588 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13589 
   13590 	return rv;
   13591 }
   13592 
   13593 /*
   13594  * Hardware semaphores.
   13595  * Very complexed...
   13596  */
   13597 
   13598 static int
   13599 wm_get_null(struct wm_softc *sc)
   13600 {
   13601 
   13602 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13603 		device_xname(sc->sc_dev), __func__));
   13604 	return 0;
   13605 }
   13606 
   13607 static void
   13608 wm_put_null(struct wm_softc *sc)
   13609 {
   13610 
   13611 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13612 		device_xname(sc->sc_dev), __func__));
   13613 	return;
   13614 }
   13615 
   13616 static int
   13617 wm_get_eecd(struct wm_softc *sc)
   13618 {
   13619 	uint32_t reg;
   13620 	int x;
   13621 
   13622 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13623 		device_xname(sc->sc_dev), __func__));
   13624 
   13625 	reg = CSR_READ(sc, WMREG_EECD);
   13626 
   13627 	/* Request EEPROM access. */
   13628 	reg |= EECD_EE_REQ;
   13629 	CSR_WRITE(sc, WMREG_EECD, reg);
   13630 
   13631 	/* ..and wait for it to be granted. */
   13632 	for (x = 0; x < 1000; x++) {
   13633 		reg = CSR_READ(sc, WMREG_EECD);
   13634 		if (reg & EECD_EE_GNT)
   13635 			break;
   13636 		delay(5);
   13637 	}
   13638 	if ((reg & EECD_EE_GNT) == 0) {
   13639 		aprint_error_dev(sc->sc_dev,
   13640 		    "could not acquire EEPROM GNT\n");
   13641 		reg &= ~EECD_EE_REQ;
   13642 		CSR_WRITE(sc, WMREG_EECD, reg);
   13643 		return -1;
   13644 	}
   13645 
   13646 	return 0;
   13647 }
   13648 
   13649 static void
   13650 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13651 {
   13652 
   13653 	*eecd |= EECD_SK;
   13654 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13655 	CSR_WRITE_FLUSH(sc);
   13656 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13657 		delay(1);
   13658 	else
   13659 		delay(50);
   13660 }
   13661 
   13662 static void
   13663 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13664 {
   13665 
   13666 	*eecd &= ~EECD_SK;
   13667 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13668 	CSR_WRITE_FLUSH(sc);
   13669 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13670 		delay(1);
   13671 	else
   13672 		delay(50);
   13673 }
   13674 
   13675 static void
   13676 wm_put_eecd(struct wm_softc *sc)
   13677 {
   13678 	uint32_t reg;
   13679 
   13680 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13681 		device_xname(sc->sc_dev), __func__));
   13682 
   13683 	/* Stop nvm */
   13684 	reg = CSR_READ(sc, WMREG_EECD);
   13685 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13686 		/* Pull CS high */
   13687 		reg |= EECD_CS;
   13688 		wm_nvm_eec_clock_lower(sc, &reg);
   13689 	} else {
   13690 		/* CS on Microwire is active-high */
   13691 		reg &= ~(EECD_CS | EECD_DI);
   13692 		CSR_WRITE(sc, WMREG_EECD, reg);
   13693 		wm_nvm_eec_clock_raise(sc, &reg);
   13694 		wm_nvm_eec_clock_lower(sc, &reg);
   13695 	}
   13696 
   13697 	reg = CSR_READ(sc, WMREG_EECD);
   13698 	reg &= ~EECD_EE_REQ;
   13699 	CSR_WRITE(sc, WMREG_EECD, reg);
   13700 
   13701 	return;
   13702 }
   13703 
   13704 /*
   13705  * Get hardware semaphore.
   13706  * Same as e1000_get_hw_semaphore_generic()
   13707  */
   13708 static int
   13709 wm_get_swsm_semaphore(struct wm_softc *sc)
   13710 {
   13711 	int32_t timeout;
   13712 	uint32_t swsm;
   13713 
   13714 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13715 		device_xname(sc->sc_dev), __func__));
   13716 	KASSERT(sc->sc_nvm_wordsize > 0);
   13717 
   13718 retry:
   13719 	/* Get the SW semaphore. */
   13720 	timeout = sc->sc_nvm_wordsize + 1;
   13721 	while (timeout) {
   13722 		swsm = CSR_READ(sc, WMREG_SWSM);
   13723 
   13724 		if ((swsm & SWSM_SMBI) == 0)
   13725 			break;
   13726 
   13727 		delay(50);
   13728 		timeout--;
   13729 	}
   13730 
   13731 	if (timeout == 0) {
   13732 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13733 			/*
   13734 			 * In rare circumstances, the SW semaphore may already
   13735 			 * be held unintentionally. Clear the semaphore once
   13736 			 * before giving up.
   13737 			 */
   13738 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13739 			wm_put_swsm_semaphore(sc);
   13740 			goto retry;
   13741 		}
   13742 		aprint_error_dev(sc->sc_dev,
   13743 		    "could not acquire SWSM SMBI\n");
   13744 		return 1;
   13745 	}
   13746 
   13747 	/* Get the FW semaphore. */
   13748 	timeout = sc->sc_nvm_wordsize + 1;
   13749 	while (timeout) {
   13750 		swsm = CSR_READ(sc, WMREG_SWSM);
   13751 		swsm |= SWSM_SWESMBI;
   13752 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13753 		/* If we managed to set the bit we got the semaphore. */
   13754 		swsm = CSR_READ(sc, WMREG_SWSM);
   13755 		if (swsm & SWSM_SWESMBI)
   13756 			break;
   13757 
   13758 		delay(50);
   13759 		timeout--;
   13760 	}
   13761 
   13762 	if (timeout == 0) {
   13763 		aprint_error_dev(sc->sc_dev,
   13764 		    "could not acquire SWSM SWESMBI\n");
   13765 		/* Release semaphores */
   13766 		wm_put_swsm_semaphore(sc);
   13767 		return 1;
   13768 	}
   13769 	return 0;
   13770 }
   13771 
   13772 /*
   13773  * Put hardware semaphore.
   13774  * Same as e1000_put_hw_semaphore_generic()
   13775  */
   13776 static void
   13777 wm_put_swsm_semaphore(struct wm_softc *sc)
   13778 {
   13779 	uint32_t swsm;
   13780 
   13781 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13782 		device_xname(sc->sc_dev), __func__));
   13783 
   13784 	swsm = CSR_READ(sc, WMREG_SWSM);
   13785 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13786 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13787 }
   13788 
   13789 /*
   13790  * Get SW/FW semaphore.
   13791  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13792  */
   13793 static int
   13794 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13795 {
   13796 	uint32_t swfw_sync;
   13797 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13798 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13799 	int timeout;
   13800 
   13801 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13802 		device_xname(sc->sc_dev), __func__));
   13803 
   13804 	if (sc->sc_type == WM_T_80003)
   13805 		timeout = 50;
   13806 	else
   13807 		timeout = 200;
   13808 
   13809 	while (timeout) {
   13810 		if (wm_get_swsm_semaphore(sc)) {
   13811 			aprint_error_dev(sc->sc_dev,
   13812 			    "%s: failed to get semaphore\n",
   13813 			    __func__);
   13814 			return 1;
   13815 		}
   13816 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13817 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13818 			swfw_sync |= swmask;
   13819 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13820 			wm_put_swsm_semaphore(sc);
   13821 			return 0;
   13822 		}
   13823 		wm_put_swsm_semaphore(sc);
   13824 		delay(5000);
   13825 		timeout--;
   13826 	}
   13827 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13828 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13829 	return 1;
   13830 }
   13831 
   13832 static void
   13833 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13834 {
   13835 	uint32_t swfw_sync;
   13836 
   13837 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13838 		device_xname(sc->sc_dev), __func__));
   13839 
   13840 	while (wm_get_swsm_semaphore(sc) != 0)
   13841 		continue;
   13842 
   13843 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13844 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13845 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13846 
   13847 	wm_put_swsm_semaphore(sc);
   13848 }
   13849 
   13850 static int
   13851 wm_get_nvm_80003(struct wm_softc *sc)
   13852 {
   13853 	int rv;
   13854 
   13855 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13856 		device_xname(sc->sc_dev), __func__));
   13857 
   13858 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13859 		aprint_error_dev(sc->sc_dev,
   13860 		    "%s: failed to get semaphore(SWFW)\n",
   13861 		    __func__);
   13862 		return rv;
   13863 	}
   13864 
   13865 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13866 	    && (rv = wm_get_eecd(sc)) != 0) {
   13867 		aprint_error_dev(sc->sc_dev,
   13868 		    "%s: failed to get semaphore(EECD)\n",
   13869 		    __func__);
   13870 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13871 		return rv;
   13872 	}
   13873 
   13874 	return 0;
   13875 }
   13876 
   13877 static void
   13878 wm_put_nvm_80003(struct wm_softc *sc)
   13879 {
   13880 
   13881 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13882 		device_xname(sc->sc_dev), __func__));
   13883 
   13884 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13885 		wm_put_eecd(sc);
   13886 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13887 }
   13888 
   13889 static int
   13890 wm_get_nvm_82571(struct wm_softc *sc)
   13891 {
   13892 	int rv;
   13893 
   13894 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13895 		device_xname(sc->sc_dev), __func__));
   13896 
   13897 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13898 		return rv;
   13899 
   13900 	switch (sc->sc_type) {
   13901 	case WM_T_82573:
   13902 		break;
   13903 	default:
   13904 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13905 			rv = wm_get_eecd(sc);
   13906 		break;
   13907 	}
   13908 
   13909 	if (rv != 0) {
   13910 		aprint_error_dev(sc->sc_dev,
   13911 		    "%s: failed to get semaphore\n",
   13912 		    __func__);
   13913 		wm_put_swsm_semaphore(sc);
   13914 	}
   13915 
   13916 	return rv;
   13917 }
   13918 
   13919 static void
   13920 wm_put_nvm_82571(struct wm_softc *sc)
   13921 {
   13922 
   13923 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13924 		device_xname(sc->sc_dev), __func__));
   13925 
   13926 	switch (sc->sc_type) {
   13927 	case WM_T_82573:
   13928 		break;
   13929 	default:
   13930 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13931 			wm_put_eecd(sc);
   13932 		break;
   13933 	}
   13934 
   13935 	wm_put_swsm_semaphore(sc);
   13936 }
   13937 
   13938 static int
   13939 wm_get_phy_82575(struct wm_softc *sc)
   13940 {
   13941 
   13942 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13943 		device_xname(sc->sc_dev), __func__));
   13944 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13945 }
   13946 
   13947 static void
   13948 wm_put_phy_82575(struct wm_softc *sc)
   13949 {
   13950 
   13951 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13952 		device_xname(sc->sc_dev), __func__));
   13953 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13954 }
   13955 
   13956 static int
   13957 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13958 {
   13959 	uint32_t ext_ctrl;
   13960 	int timeout = 200;
   13961 
   13962 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13963 		device_xname(sc->sc_dev), __func__));
   13964 
   13965 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13966 	for (timeout = 0; timeout < 200; timeout++) {
   13967 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13968 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13969 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13970 
   13971 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13972 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13973 			return 0;
   13974 		delay(5000);
   13975 	}
   13976 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13977 	    device_xname(sc->sc_dev), ext_ctrl);
   13978 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13979 	return 1;
   13980 }
   13981 
   13982 static void
   13983 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13984 {
   13985 	uint32_t ext_ctrl;
   13986 
   13987 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13988 		device_xname(sc->sc_dev), __func__));
   13989 
   13990 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13991 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13992 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13993 
   13994 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13995 }
   13996 
   13997 static int
   13998 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13999 {
   14000 	uint32_t ext_ctrl;
   14001 	int timeout;
   14002 
   14003 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14004 		device_xname(sc->sc_dev), __func__));
   14005 	mutex_enter(sc->sc_ich_phymtx);
   14006 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14007 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14008 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14009 			break;
   14010 		delay(1000);
   14011 	}
   14012 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14013 		printf("%s: SW has already locked the resource\n",
   14014 		    device_xname(sc->sc_dev));
   14015 		goto out;
   14016 	}
   14017 
   14018 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14019 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14020 	for (timeout = 0; timeout < 1000; timeout++) {
   14021 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14022 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14023 			break;
   14024 		delay(1000);
   14025 	}
   14026 	if (timeout >= 1000) {
   14027 		printf("%s: failed to acquire semaphore\n",
   14028 		    device_xname(sc->sc_dev));
   14029 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14030 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14031 		goto out;
   14032 	}
   14033 	return 0;
   14034 
   14035 out:
   14036 	mutex_exit(sc->sc_ich_phymtx);
   14037 	return 1;
   14038 }
   14039 
   14040 static void
   14041 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14042 {
   14043 	uint32_t ext_ctrl;
   14044 
   14045 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14046 		device_xname(sc->sc_dev), __func__));
   14047 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14048 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14049 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14050 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14051 	} else {
   14052 		printf("%s: Semaphore unexpectedly released\n",
   14053 		    device_xname(sc->sc_dev));
   14054 	}
   14055 
   14056 	mutex_exit(sc->sc_ich_phymtx);
   14057 }
   14058 
   14059 static int
   14060 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14061 {
   14062 
   14063 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14064 		device_xname(sc->sc_dev), __func__));
   14065 	mutex_enter(sc->sc_ich_nvmmtx);
   14066 
   14067 	return 0;
   14068 }
   14069 
   14070 static void
   14071 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14072 {
   14073 
   14074 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14075 		device_xname(sc->sc_dev), __func__));
   14076 	mutex_exit(sc->sc_ich_nvmmtx);
   14077 }
   14078 
   14079 static int
   14080 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14081 {
   14082 	int i = 0;
   14083 	uint32_t reg;
   14084 
   14085 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14086 		device_xname(sc->sc_dev), __func__));
   14087 
   14088 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14089 	do {
   14090 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14091 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14092 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14093 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14094 			break;
   14095 		delay(2*1000);
   14096 		i++;
   14097 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14098 
   14099 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14100 		wm_put_hw_semaphore_82573(sc);
   14101 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14102 		    device_xname(sc->sc_dev));
   14103 		return -1;
   14104 	}
   14105 
   14106 	return 0;
   14107 }
   14108 
   14109 static void
   14110 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14111 {
   14112 	uint32_t reg;
   14113 
   14114 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14115 		device_xname(sc->sc_dev), __func__));
   14116 
   14117 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14118 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14119 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14120 }
   14121 
   14122 /*
   14123  * Management mode and power management related subroutines.
   14124  * BMC, AMT, suspend/resume and EEE.
   14125  */
   14126 
   14127 #ifdef WM_WOL
   14128 static int
   14129 wm_check_mng_mode(struct wm_softc *sc)
   14130 {
   14131 	int rv;
   14132 
   14133 	switch (sc->sc_type) {
   14134 	case WM_T_ICH8:
   14135 	case WM_T_ICH9:
   14136 	case WM_T_ICH10:
   14137 	case WM_T_PCH:
   14138 	case WM_T_PCH2:
   14139 	case WM_T_PCH_LPT:
   14140 	case WM_T_PCH_SPT:
   14141 	case WM_T_PCH_CNP:
   14142 		rv = wm_check_mng_mode_ich8lan(sc);
   14143 		break;
   14144 	case WM_T_82574:
   14145 	case WM_T_82583:
   14146 		rv = wm_check_mng_mode_82574(sc);
   14147 		break;
   14148 	case WM_T_82571:
   14149 	case WM_T_82572:
   14150 	case WM_T_82573:
   14151 	case WM_T_80003:
   14152 		rv = wm_check_mng_mode_generic(sc);
   14153 		break;
   14154 	default:
   14155 		/* noting to do */
   14156 		rv = 0;
   14157 		break;
   14158 	}
   14159 
   14160 	return rv;
   14161 }
   14162 
   14163 static int
   14164 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14165 {
   14166 	uint32_t fwsm;
   14167 
   14168 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14169 
   14170 	if (((fwsm & FWSM_FW_VALID) != 0)
   14171 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14172 		return 1;
   14173 
   14174 	return 0;
   14175 }
   14176 
   14177 static int
   14178 wm_check_mng_mode_82574(struct wm_softc *sc)
   14179 {
   14180 	uint16_t data;
   14181 
   14182 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14183 
   14184 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14185 		return 1;
   14186 
   14187 	return 0;
   14188 }
   14189 
   14190 static int
   14191 wm_check_mng_mode_generic(struct wm_softc *sc)
   14192 {
   14193 	uint32_t fwsm;
   14194 
   14195 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14196 
   14197 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14198 		return 1;
   14199 
   14200 	return 0;
   14201 }
   14202 #endif /* WM_WOL */
   14203 
   14204 static int
   14205 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14206 {
   14207 	uint32_t manc, fwsm, factps;
   14208 
   14209 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14210 		return 0;
   14211 
   14212 	manc = CSR_READ(sc, WMREG_MANC);
   14213 
   14214 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14215 		device_xname(sc->sc_dev), manc));
   14216 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14217 		return 0;
   14218 
   14219 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14220 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14221 		factps = CSR_READ(sc, WMREG_FACTPS);
   14222 		if (((factps & FACTPS_MNGCG) == 0)
   14223 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14224 			return 1;
   14225 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14226 		uint16_t data;
   14227 
   14228 		factps = CSR_READ(sc, WMREG_FACTPS);
   14229 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14230 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14231 			device_xname(sc->sc_dev), factps, data));
   14232 		if (((factps & FACTPS_MNGCG) == 0)
   14233 		    && ((data & NVM_CFG2_MNGM_MASK)
   14234 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14235 			return 1;
   14236 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14237 	    && ((manc & MANC_ASF_EN) == 0))
   14238 		return 1;
   14239 
   14240 	return 0;
   14241 }
   14242 
   14243 static bool
   14244 wm_phy_resetisblocked(struct wm_softc *sc)
   14245 {
   14246 	bool blocked = false;
   14247 	uint32_t reg;
   14248 	int i = 0;
   14249 
   14250 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14251 		device_xname(sc->sc_dev), __func__));
   14252 
   14253 	switch (sc->sc_type) {
   14254 	case WM_T_ICH8:
   14255 	case WM_T_ICH9:
   14256 	case WM_T_ICH10:
   14257 	case WM_T_PCH:
   14258 	case WM_T_PCH2:
   14259 	case WM_T_PCH_LPT:
   14260 	case WM_T_PCH_SPT:
   14261 	case WM_T_PCH_CNP:
   14262 		do {
   14263 			reg = CSR_READ(sc, WMREG_FWSM);
   14264 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14265 				blocked = true;
   14266 				delay(10*1000);
   14267 				continue;
   14268 			}
   14269 			blocked = false;
   14270 		} while (blocked && (i++ < 30));
   14271 		return blocked;
   14272 		break;
   14273 	case WM_T_82571:
   14274 	case WM_T_82572:
   14275 	case WM_T_82573:
   14276 	case WM_T_82574:
   14277 	case WM_T_82583:
   14278 	case WM_T_80003:
   14279 		reg = CSR_READ(sc, WMREG_MANC);
   14280 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14281 			return true;
   14282 		else
   14283 			return false;
   14284 		break;
   14285 	default:
   14286 		/* no problem */
   14287 		break;
   14288 	}
   14289 
   14290 	return false;
   14291 }
   14292 
   14293 static void
   14294 wm_get_hw_control(struct wm_softc *sc)
   14295 {
   14296 	uint32_t reg;
   14297 
   14298 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14299 		device_xname(sc->sc_dev), __func__));
   14300 
   14301 	if (sc->sc_type == WM_T_82573) {
   14302 		reg = CSR_READ(sc, WMREG_SWSM);
   14303 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14304 	} else if (sc->sc_type >= WM_T_82571) {
   14305 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14306 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14307 	}
   14308 }
   14309 
   14310 static void
   14311 wm_release_hw_control(struct wm_softc *sc)
   14312 {
   14313 	uint32_t reg;
   14314 
   14315 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14316 		device_xname(sc->sc_dev), __func__));
   14317 
   14318 	if (sc->sc_type == WM_T_82573) {
   14319 		reg = CSR_READ(sc, WMREG_SWSM);
   14320 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14321 	} else if (sc->sc_type >= WM_T_82571) {
   14322 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14323 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14324 	}
   14325 }
   14326 
   14327 static void
   14328 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14329 {
   14330 	uint32_t reg;
   14331 
   14332 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14333 		device_xname(sc->sc_dev), __func__));
   14334 
   14335 	if (sc->sc_type < WM_T_PCH2)
   14336 		return;
   14337 
   14338 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14339 
   14340 	if (gate)
   14341 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14342 	else
   14343 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14344 
   14345 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14346 }
   14347 
   14348 static int
   14349 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14350 {
   14351 	uint32_t fwsm, reg;
   14352 	int rv = 0;
   14353 
   14354 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14355 		device_xname(sc->sc_dev), __func__));
   14356 
   14357 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14358 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14359 
   14360 	/* Disable ULP */
   14361 	wm_ulp_disable(sc);
   14362 
   14363 	/* Acquire PHY semaphore */
   14364 	rv = sc->phy.acquire(sc);
   14365 	if (rv != 0) {
   14366 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14367 		device_xname(sc->sc_dev), __func__));
   14368 		return -1;
   14369 	}
   14370 
   14371 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14372 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14373 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14374 	 */
   14375 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14376 	switch (sc->sc_type) {
   14377 	case WM_T_PCH_LPT:
   14378 	case WM_T_PCH_SPT:
   14379 	case WM_T_PCH_CNP:
   14380 		if (wm_phy_is_accessible_pchlan(sc))
   14381 			break;
   14382 
   14383 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14384 		 * forcing MAC to SMBus mode first.
   14385 		 */
   14386 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14387 		reg |= CTRL_EXT_FORCE_SMBUS;
   14388 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14389 #if 0
   14390 		/* XXX Isn't this required??? */
   14391 		CSR_WRITE_FLUSH(sc);
   14392 #endif
   14393 		/* Wait 50 milliseconds for MAC to finish any retries
   14394 		 * that it might be trying to perform from previous
   14395 		 * attempts to acknowledge any phy read requests.
   14396 		 */
   14397 		delay(50 * 1000);
   14398 		/* FALLTHROUGH */
   14399 	case WM_T_PCH2:
   14400 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14401 			break;
   14402 		/* FALLTHROUGH */
   14403 	case WM_T_PCH:
   14404 		if (sc->sc_type == WM_T_PCH)
   14405 			if ((fwsm & FWSM_FW_VALID) != 0)
   14406 				break;
   14407 
   14408 		if (wm_phy_resetisblocked(sc) == true) {
   14409 			printf("XXX reset is blocked(3)\n");
   14410 			break;
   14411 		}
   14412 
   14413 		/* Toggle LANPHYPC Value bit */
   14414 		wm_toggle_lanphypc_pch_lpt(sc);
   14415 
   14416 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14417 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14418 				break;
   14419 
   14420 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14421 			 * so ensure that the MAC is also out of SMBus mode
   14422 			 */
   14423 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14424 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14425 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14426 
   14427 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14428 				break;
   14429 			rv = -1;
   14430 		}
   14431 		break;
   14432 	default:
   14433 		break;
   14434 	}
   14435 
   14436 	/* Release semaphore */
   14437 	sc->phy.release(sc);
   14438 
   14439 	if (rv == 0) {
   14440 		/* Check to see if able to reset PHY.  Print error if not */
   14441 		if (wm_phy_resetisblocked(sc)) {
   14442 			printf("XXX reset is blocked(4)\n");
   14443 			goto out;
   14444 		}
   14445 
   14446 		/* Reset the PHY before any access to it.  Doing so, ensures
   14447 		 * that the PHY is in a known good state before we read/write
   14448 		 * PHY registers.  The generic reset is sufficient here,
   14449 		 * because we haven't determined the PHY type yet.
   14450 		 */
   14451 		if (wm_reset_phy(sc) != 0)
   14452 			goto out;
   14453 
   14454 		/* On a successful reset, possibly need to wait for the PHY
   14455 		 * to quiesce to an accessible state before returning control
   14456 		 * to the calling function.  If the PHY does not quiesce, then
   14457 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14458 		 *  the PHY is in.
   14459 		 */
   14460 		if (wm_phy_resetisblocked(sc))
   14461 			printf("XXX reset is blocked(4)\n");
   14462 	}
   14463 
   14464 out:
   14465 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14466 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14467 		delay(10*1000);
   14468 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14469 	}
   14470 
   14471 	return 0;
   14472 }
   14473 
   14474 static void
   14475 wm_init_manageability(struct wm_softc *sc)
   14476 {
   14477 
   14478 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14479 		device_xname(sc->sc_dev), __func__));
   14480 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14481 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14482 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14483 
   14484 		/* Disable hardware interception of ARP */
   14485 		manc &= ~MANC_ARP_EN;
   14486 
   14487 		/* Enable receiving management packets to the host */
   14488 		if (sc->sc_type >= WM_T_82571) {
   14489 			manc |= MANC_EN_MNG2HOST;
   14490 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14491 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14492 		}
   14493 
   14494 		CSR_WRITE(sc, WMREG_MANC, manc);
   14495 	}
   14496 }
   14497 
   14498 static void
   14499 wm_release_manageability(struct wm_softc *sc)
   14500 {
   14501 
   14502 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14503 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14504 
   14505 		manc |= MANC_ARP_EN;
   14506 		if (sc->sc_type >= WM_T_82571)
   14507 			manc &= ~MANC_EN_MNG2HOST;
   14508 
   14509 		CSR_WRITE(sc, WMREG_MANC, manc);
   14510 	}
   14511 }
   14512 
   14513 static void
   14514 wm_get_wakeup(struct wm_softc *sc)
   14515 {
   14516 
   14517 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14518 	switch (sc->sc_type) {
   14519 	case WM_T_82573:
   14520 	case WM_T_82583:
   14521 		sc->sc_flags |= WM_F_HAS_AMT;
   14522 		/* FALLTHROUGH */
   14523 	case WM_T_80003:
   14524 	case WM_T_82575:
   14525 	case WM_T_82576:
   14526 	case WM_T_82580:
   14527 	case WM_T_I350:
   14528 	case WM_T_I354:
   14529 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14530 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14531 		/* FALLTHROUGH */
   14532 	case WM_T_82541:
   14533 	case WM_T_82541_2:
   14534 	case WM_T_82547:
   14535 	case WM_T_82547_2:
   14536 	case WM_T_82571:
   14537 	case WM_T_82572:
   14538 	case WM_T_82574:
   14539 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14540 		break;
   14541 	case WM_T_ICH8:
   14542 	case WM_T_ICH9:
   14543 	case WM_T_ICH10:
   14544 	case WM_T_PCH:
   14545 	case WM_T_PCH2:
   14546 	case WM_T_PCH_LPT:
   14547 	case WM_T_PCH_SPT:
   14548 	case WM_T_PCH_CNP:
   14549 		sc->sc_flags |= WM_F_HAS_AMT;
   14550 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14551 		break;
   14552 	default:
   14553 		break;
   14554 	}
   14555 
   14556 	/* 1: HAS_MANAGE */
   14557 	if (wm_enable_mng_pass_thru(sc) != 0)
   14558 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14559 
   14560 	/*
   14561 	 * Note that the WOL flags is set after the resetting of the eeprom
   14562 	 * stuff
   14563 	 */
   14564 }
   14565 
   14566 /*
   14567  * Unconfigure Ultra Low Power mode.
   14568  * Only for I217 and newer (see below).
   14569  */
   14570 static int
   14571 wm_ulp_disable(struct wm_softc *sc)
   14572 {
   14573 	uint32_t reg;
   14574 	uint16_t phyreg;
   14575 	int i = 0, rv = 0;
   14576 
   14577 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14578 		device_xname(sc->sc_dev), __func__));
   14579 	/* Exclude old devices */
   14580 	if ((sc->sc_type < WM_T_PCH_LPT)
   14581 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14582 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14583 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14584 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14585 		return 0;
   14586 
   14587 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14588 		/* Request ME un-configure ULP mode in the PHY */
   14589 		reg = CSR_READ(sc, WMREG_H2ME);
   14590 		reg &= ~H2ME_ULP;
   14591 		reg |= H2ME_ENFORCE_SETTINGS;
   14592 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14593 
   14594 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14595 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14596 			if (i++ == 30) {
   14597 				printf("%s timed out\n", __func__);
   14598 				return -1;
   14599 			}
   14600 			delay(10 * 1000);
   14601 		}
   14602 		reg = CSR_READ(sc, WMREG_H2ME);
   14603 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14604 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14605 
   14606 		return 0;
   14607 	}
   14608 
   14609 	/* Acquire semaphore */
   14610 	rv = sc->phy.acquire(sc);
   14611 	if (rv != 0) {
   14612 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14613 		device_xname(sc->sc_dev), __func__));
   14614 		return -1;
   14615 	}
   14616 
   14617 	/* Toggle LANPHYPC */
   14618 	wm_toggle_lanphypc_pch_lpt(sc);
   14619 
   14620 	/* Unforce SMBus mode in PHY */
   14621 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14622 	if (rv != 0) {
   14623 		uint32_t reg2;
   14624 
   14625 		printf("%s: Force SMBus first.\n", __func__);
   14626 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14627 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14628 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14629 		delay(50 * 1000);
   14630 
   14631 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14632 		    &phyreg);
   14633 		if (rv != 0)
   14634 			goto release;
   14635 	}
   14636 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14637 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14638 
   14639 	/* Unforce SMBus mode in MAC */
   14640 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14641 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14642 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14643 
   14644 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14645 	if (rv != 0)
   14646 		goto release;
   14647 	phyreg |= HV_PM_CTRL_K1_ENA;
   14648 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14649 
   14650 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14651 		&phyreg);
   14652 	if (rv != 0)
   14653 		goto release;
   14654 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14655 	    | I218_ULP_CONFIG1_STICKY_ULP
   14656 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14657 	    | I218_ULP_CONFIG1_WOL_HOST
   14658 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14659 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14660 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14661 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14662 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14663 	phyreg |= I218_ULP_CONFIG1_START;
   14664 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14665 
   14666 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14667 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14668 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14669 
   14670 release:
   14671 	/* Release semaphore */
   14672 	sc->phy.release(sc);
   14673 	wm_gmii_reset(sc);
   14674 	delay(50 * 1000);
   14675 
   14676 	return rv;
   14677 }
   14678 
   14679 /* WOL in the newer chipset interfaces (pchlan) */
   14680 static int
   14681 wm_enable_phy_wakeup(struct wm_softc *sc)
   14682 {
   14683 	device_t dev = sc->sc_dev;
   14684 	uint32_t mreg, moff;
   14685 	uint16_t wuce, wuc, wufc, preg;
   14686 	int i, rv;
   14687 
   14688 	KASSERT(sc->sc_type >= WM_T_PCH);
   14689 
   14690 	/* Copy MAC RARs to PHY RARs */
   14691 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   14692 
   14693 	/* Activate PHY wakeup */
   14694 	rv = sc->phy.acquire(sc);
   14695 	if (rv != 0) {
   14696 		device_printf(dev, "%s: failed to acquire semaphore\n",
   14697 		    __func__);
   14698 		return rv;
   14699 	}
   14700 
   14701 	/*
   14702 	 * Enable access to PHY wakeup registers.
   14703 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   14704 	 */
   14705 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   14706 	if (rv != 0) {
   14707 		device_printf(dev,
   14708 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   14709 		goto release;
   14710 	}
   14711 
   14712 	/* Copy MAC MTA to PHY MTA */
   14713 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   14714 		uint16_t lo, hi;
   14715 
   14716 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   14717 		lo = (uint16_t)(mreg & 0xffff);
   14718 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   14719 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   14720 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   14721 	}
   14722 
   14723 	/* Configure PHY Rx Control register */
   14724 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   14725 	mreg = CSR_READ(sc, WMREG_RCTL);
   14726 	if (mreg & RCTL_UPE)
   14727 		preg |= BM_RCTL_UPE;
   14728 	if (mreg & RCTL_MPE)
   14729 		preg |= BM_RCTL_MPE;
   14730 	preg &= ~(BM_RCTL_MO_MASK);
   14731 	moff = __SHIFTOUT(mreg, RCTL_MO);
   14732 	if (moff != 0)
   14733 		preg |= moff << BM_RCTL_MO_SHIFT;
   14734 	if (mreg & RCTL_BAM)
   14735 		preg |= BM_RCTL_BAM;
   14736 	if (mreg & RCTL_PMCF)
   14737 		preg |= BM_RCTL_PMCF;
   14738 	mreg = CSR_READ(sc, WMREG_CTRL);
   14739 	if (mreg & CTRL_RFCE)
   14740 		preg |= BM_RCTL_RFCE;
   14741 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   14742 
   14743 	wuc = WUC_APME | WUC_PME_EN;
   14744 	wufc = WUFC_MAG;
   14745 	/* Enable PHY wakeup in MAC register */
   14746 	CSR_WRITE(sc, WMREG_WUC,
   14747 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   14748 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   14749 
   14750 	/* Configure and enable PHY wakeup in PHY registers */
   14751 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   14752 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   14753 
   14754 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   14755 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   14756 
   14757 release:
   14758 	sc->phy.release(sc);
   14759 
   14760 	return 0;
   14761 }
   14762 
   14763 /* Power down workaround on D3 */
   14764 static void
   14765 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14766 {
   14767 	uint32_t reg;
   14768 	uint16_t phyreg;
   14769 	int i;
   14770 
   14771 	for (i = 0; i < 2; i++) {
   14772 		/* Disable link */
   14773 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14774 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14775 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14776 
   14777 		/*
   14778 		 * Call gig speed drop workaround on Gig disable before
   14779 		 * accessing any PHY registers
   14780 		 */
   14781 		if (sc->sc_type == WM_T_ICH8)
   14782 			wm_gig_downshift_workaround_ich8lan(sc);
   14783 
   14784 		/* Write VR power-down enable */
   14785 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14786 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14787 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14788 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   14789 
   14790 		/* Read it back and test */
   14791 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14792 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14793 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14794 			break;
   14795 
   14796 		/* Issue PHY reset and repeat at most one more time */
   14797 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14798 	}
   14799 }
   14800 
   14801 /*
   14802  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   14803  *  @sc: pointer to the HW structure
   14804  *
   14805  *  During S0 to Sx transition, it is possible the link remains at gig
   14806  *  instead of negotiating to a lower speed.  Before going to Sx, set
   14807  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   14808  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   14809  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   14810  *  needs to be written.
   14811  *  Parts that support (and are linked to a partner which support) EEE in
   14812  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   14813  *  than 10Mbps w/o EEE.
   14814  */
   14815 static void
   14816 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   14817 {
   14818 	device_t dev = sc->sc_dev;
   14819 	struct ethercom *ec = &sc->sc_ethercom;
   14820 	uint32_t phy_ctrl;
   14821 	int rv;
   14822 
   14823 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   14824 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   14825 
   14826 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   14827 
   14828 	if (sc->sc_phytype == WMPHY_I217) {
   14829 		uint16_t devid = sc->sc_pcidevid;
   14830 
   14831 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   14832 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   14833 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   14834 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   14835 		    (sc->sc_type >= WM_T_PCH_SPT))
   14836 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   14837 			    CSR_READ(sc, WMREG_FEXTNVM6)
   14838 			    & ~FEXTNVM6_REQ_PLL_CLK);
   14839 
   14840 		if (sc->phy.acquire(sc) != 0)
   14841 			goto out;
   14842 
   14843 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   14844 			uint16_t eee_advert;
   14845 
   14846 			rv = wm_read_emi_reg_locked(dev,
   14847 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   14848 			if (rv)
   14849 				goto release;
   14850 
   14851 			/*
   14852 			 * Disable LPLU if both link partners support 100BaseT
   14853 			 * EEE and 100Full is advertised on both ends of the
   14854 			 * link, and enable Auto Enable LPI since there will
   14855 			 * be no driver to enable LPI while in Sx.
   14856 			 */
   14857 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   14858 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   14859 				uint16_t anar, phy_reg;
   14860 
   14861 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   14862 				    &anar);
   14863 				if (anar & ANAR_TX_FD) {
   14864 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   14865 					    PHY_CTRL_NOND0A_LPLU);
   14866 
   14867 					/* Set Auto Enable LPI after link up */
   14868 					sc->phy.readreg_locked(dev, 2,
   14869 					    I217_LPI_GPIO_CTRL, &phy_reg);
   14870 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14871 					sc->phy.writereg_locked(dev, 2,
   14872 					    I217_LPI_GPIO_CTRL, phy_reg);
   14873 				}
   14874 			}
   14875 		}
   14876 
   14877 		/*
   14878 		 * For i217 Intel Rapid Start Technology support,
   14879 		 * when the system is going into Sx and no manageability engine
   14880 		 * is present, the driver must configure proxy to reset only on
   14881 		 * power good.	LPI (Low Power Idle) state must also reset only
   14882 		 * on power good, as well as the MTA (Multicast table array).
   14883 		 * The SMBus release must also be disabled on LCD reset.
   14884 		 */
   14885 
   14886 		/*
   14887 		 * Enable MTA to reset for Intel Rapid Start Technology
   14888 		 * Support
   14889 		 */
   14890 
   14891 release:
   14892 		sc->phy.release(sc);
   14893 	}
   14894 out:
   14895 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   14896 
   14897 	if (sc->sc_type == WM_T_ICH8)
   14898 		wm_gig_downshift_workaround_ich8lan(sc);
   14899 
   14900 	if (sc->sc_type >= WM_T_PCH) {
   14901 		wm_oem_bits_config_ich8lan(sc, false);
   14902 
   14903 		/* Reset PHY to activate OEM bits on 82577/8 */
   14904 		if (sc->sc_type == WM_T_PCH)
   14905 			wm_reset_phy(sc);
   14906 
   14907 		if (sc->phy.acquire(sc) != 0)
   14908 			return;
   14909 		wm_write_smbus_addr(sc);
   14910 		sc->phy.release(sc);
   14911 	}
   14912 }
   14913 
   14914 /*
   14915  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   14916  *  @sc: pointer to the HW structure
   14917  *
   14918  *  During Sx to S0 transitions on non-managed devices or managed devices
   14919  *  on which PHY resets are not blocked, if the PHY registers cannot be
   14920  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   14921  *  the PHY.
   14922  *  On i217, setup Intel Rapid Start Technology.
   14923  */
   14924 static int
   14925 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   14926 {
   14927 	device_t dev = sc->sc_dev;
   14928 	int rv;
   14929 
   14930 	if (sc->sc_type < WM_T_PCH2)
   14931 		return 0;
   14932 
   14933 	rv = wm_init_phy_workarounds_pchlan(sc);
   14934 	if (rv != 0)
   14935 		return -1;
   14936 
   14937 	/* For i217 Intel Rapid Start Technology support when the system
   14938 	 * is transitioning from Sx and no manageability engine is present
   14939 	 * configure SMBus to restore on reset, disable proxy, and enable
   14940 	 * the reset on MTA (Multicast table array).
   14941 	 */
   14942 	if (sc->sc_phytype == WMPHY_I217) {
   14943 		uint16_t phy_reg;
   14944 
   14945 		if (sc->phy.acquire(sc) != 0)
   14946 			return -1;
   14947 
   14948 		/* Clear Auto Enable LPI after link up */
   14949 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   14950 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14951 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   14952 
   14953 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14954 			/* Restore clear on SMB if no manageability engine
   14955 			 * is present
   14956 			 */
   14957 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   14958 			    &phy_reg);
   14959 			if (rv != 0)
   14960 				goto release;
   14961 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   14962 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   14963 
   14964 			/* Disable Proxy */
   14965 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   14966 		}
   14967 		/* Enable reset on MTA */
   14968 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   14969 		if (rv != 0)
   14970 			goto release;
   14971 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   14972 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   14973 
   14974 release:
   14975 		sc->phy.release(sc);
   14976 		return rv;
   14977 	}
   14978 
   14979 	return 0;
   14980 }
   14981 
   14982 static void
   14983 wm_enable_wakeup(struct wm_softc *sc)
   14984 {
   14985 	uint32_t reg, pmreg;
   14986 	pcireg_t pmode;
   14987 	int rv = 0;
   14988 
   14989 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14990 		device_xname(sc->sc_dev), __func__));
   14991 
   14992 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14993 	    &pmreg, NULL) == 0)
   14994 		return;
   14995 
   14996 	if ((sc->sc_flags & WM_F_WOL) == 0)
   14997 		goto pme;
   14998 
   14999 	/* Advertise the wakeup capability */
   15000 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15001 	    | CTRL_SWDPIN(3));
   15002 
   15003 	/* Keep the laser running on fiber adapters */
   15004 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15005 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15006 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15007 		reg |= CTRL_EXT_SWDPIN(3);
   15008 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15009 	}
   15010 
   15011 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15012 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15013 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15014 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15015 		wm_suspend_workarounds_ich8lan(sc);
   15016 
   15017 #if 0	/* for the multicast packet */
   15018 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15019 	reg |= WUFC_MC;
   15020 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15021 #endif
   15022 
   15023 	if (sc->sc_type >= WM_T_PCH) {
   15024 		rv = wm_enable_phy_wakeup(sc);
   15025 		if (rv != 0)
   15026 			goto pme;
   15027 	} else {
   15028 		/* Enable wakeup by the MAC */
   15029 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   15030 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15031 	}
   15032 
   15033 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15034 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15035 		|| (sc->sc_type == WM_T_PCH2))
   15036 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15037 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15038 
   15039 pme:
   15040 	/* Request PME */
   15041 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15042 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15043 		/* For WOL */
   15044 		pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   15045 	} else {
   15046 		/* Disable WOL */
   15047 		pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   15048 	}
   15049 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15050 }
   15051 
   15052 /* Disable ASPM L0s and/or L1 for workaround */
   15053 static void
   15054 wm_disable_aspm(struct wm_softc *sc)
   15055 {
   15056 	pcireg_t reg, mask = 0;
   15057 	unsigned const char *str = "";
   15058 
   15059 	/*
   15060 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15061 	 * space.
   15062 	 */
   15063 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15064 		return;
   15065 
   15066 	switch (sc->sc_type) {
   15067 	case WM_T_82571:
   15068 	case WM_T_82572:
   15069 		/*
   15070 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15071 		 * State Power management L1 State (ASPM L1).
   15072 		 */
   15073 		mask = PCIE_LCSR_ASPM_L1;
   15074 		str = "L1 is";
   15075 		break;
   15076 	case WM_T_82573:
   15077 	case WM_T_82574:
   15078 	case WM_T_82583:
   15079 		/*
   15080 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15081 		 *
   15082 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15083 		 * some chipset.  The document of 82574 and 82583 says that
   15084 		 * disabling L0s with some specific chipset is sufficient,
   15085 		 * but we follow as of the Intel em driver does.
   15086 		 *
   15087 		 * References:
   15088 		 * Errata 8 of the Specification Update of i82573.
   15089 		 * Errata 20 of the Specification Update of i82574.
   15090 		 * Errata 9 of the Specification Update of i82583.
   15091 		 */
   15092 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15093 		str = "L0s and L1 are";
   15094 		break;
   15095 	default:
   15096 		return;
   15097 	}
   15098 
   15099 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15100 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15101 	reg &= ~mask;
   15102 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15103 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15104 
   15105 	/* Print only in wm_attach() */
   15106 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15107 		aprint_verbose_dev(sc->sc_dev,
   15108 		    "ASPM %s disabled to workaround the errata.\n", str);
   15109 }
   15110 
   15111 /* LPLU */
   15112 
   15113 static void
   15114 wm_lplu_d0_disable(struct wm_softc *sc)
   15115 {
   15116 	struct mii_data *mii = &sc->sc_mii;
   15117 	uint32_t reg;
   15118 	uint16_t phyval;
   15119 
   15120 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15121 		device_xname(sc->sc_dev), __func__));
   15122 
   15123 	if (sc->sc_phytype == WMPHY_IFE)
   15124 		return;
   15125 
   15126 	switch (sc->sc_type) {
   15127 	case WM_T_82571:
   15128 	case WM_T_82572:
   15129 	case WM_T_82573:
   15130 	case WM_T_82575:
   15131 	case WM_T_82576:
   15132 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   15133 		phyval &= ~PMR_D0_LPLU;
   15134 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   15135 		break;
   15136 	case WM_T_82580:
   15137 	case WM_T_I350:
   15138 	case WM_T_I210:
   15139 	case WM_T_I211:
   15140 		reg = CSR_READ(sc, WMREG_PHPM);
   15141 		reg &= ~PHPM_D0A_LPLU;
   15142 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15143 		break;
   15144 	case WM_T_82574:
   15145 	case WM_T_82583:
   15146 	case WM_T_ICH8:
   15147 	case WM_T_ICH9:
   15148 	case WM_T_ICH10:
   15149 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15150 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15151 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15152 		CSR_WRITE_FLUSH(sc);
   15153 		break;
   15154 	case WM_T_PCH:
   15155 	case WM_T_PCH2:
   15156 	case WM_T_PCH_LPT:
   15157 	case WM_T_PCH_SPT:
   15158 	case WM_T_PCH_CNP:
   15159 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15160 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15161 		if (wm_phy_resetisblocked(sc) == false)
   15162 			phyval |= HV_OEM_BITS_ANEGNOW;
   15163 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15164 		break;
   15165 	default:
   15166 		break;
   15167 	}
   15168 }
   15169 
   15170 /* EEE */
   15171 
   15172 static int
   15173 wm_set_eee_i350(struct wm_softc *sc)
   15174 {
   15175 	struct ethercom *ec = &sc->sc_ethercom;
   15176 	uint32_t ipcnfg, eeer;
   15177 	uint32_t ipcnfg_mask
   15178 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15179 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15180 
   15181 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15182 	eeer = CSR_READ(sc, WMREG_EEER);
   15183 
   15184 	/* enable or disable per user setting */
   15185 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15186 		ipcnfg |= ipcnfg_mask;
   15187 		eeer |= eeer_mask;
   15188 	} else {
   15189 		ipcnfg &= ~ipcnfg_mask;
   15190 		eeer &= ~eeer_mask;
   15191 	}
   15192 
   15193 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15194 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15195 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15196 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15197 
   15198 	return 0;
   15199 }
   15200 
   15201 static int
   15202 wm_set_eee_pchlan(struct wm_softc *sc)
   15203 {
   15204 	device_t dev = sc->sc_dev;
   15205 	struct ethercom *ec = &sc->sc_ethercom;
   15206 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15207 	int rv = 0;
   15208 
   15209 	switch (sc->sc_phytype) {
   15210 	case WMPHY_82579:
   15211 		lpa = I82579_EEE_LP_ABILITY;
   15212 		pcs_status = I82579_EEE_PCS_STATUS;
   15213 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15214 		break;
   15215 	case WMPHY_I217:
   15216 		lpa = I217_EEE_LP_ABILITY;
   15217 		pcs_status = I217_EEE_PCS_STATUS;
   15218 		adv_addr = I217_EEE_ADVERTISEMENT;
   15219 		break;
   15220 	default:
   15221 		return 0;
   15222 	}
   15223 
   15224 	if (sc->phy.acquire(sc)) {
   15225 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15226 		return 0;
   15227 	}
   15228 
   15229 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15230 	if (rv != 0)
   15231 		goto release;
   15232 
   15233 	/* Clear bits that enable EEE in various speeds */
   15234 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15235 
   15236 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15237 		/* Save off link partner's EEE ability */
   15238 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15239 		if (rv != 0)
   15240 			goto release;
   15241 
   15242 		/* Read EEE advertisement */
   15243 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15244 			goto release;
   15245 
   15246 		/*
   15247 		 * Enable EEE only for speeds in which the link partner is
   15248 		 * EEE capable and for which we advertise EEE.
   15249 		 */
   15250 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15251 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15252 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15253 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15254 			if ((data & ANLPAR_TX_FD) != 0)
   15255 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15256 			else {
   15257 				/*
   15258 				 * EEE is not supported in 100Half, so ignore
   15259 				 * partner's EEE in 100 ability if full-duplex
   15260 				 * is not advertised.
   15261 				 */
   15262 				sc->eee_lp_ability
   15263 				    &= ~AN_EEEADVERT_100_TX;
   15264 			}
   15265 		}
   15266 	}
   15267 
   15268 	if (sc->sc_phytype == WMPHY_82579) {
   15269 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15270 		if (rv != 0)
   15271 			goto release;
   15272 
   15273 		data &= ~I82579_LPI_PLL_SHUT_100;
   15274 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15275 	}
   15276 
   15277 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15278 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15279 		goto release;
   15280 
   15281 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15282 release:
   15283 	sc->phy.release(sc);
   15284 
   15285 	return rv;
   15286 }
   15287 
   15288 static int
   15289 wm_set_eee(struct wm_softc *sc)
   15290 {
   15291 	struct ethercom *ec = &sc->sc_ethercom;
   15292 
   15293 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15294 		return 0;
   15295 
   15296 	if (sc->sc_type == WM_T_I354) {
   15297 		/* I354 uses an external PHY */
   15298 		return 0; /* not yet */
   15299 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15300 		return wm_set_eee_i350(sc);
   15301 	else if (sc->sc_type >= WM_T_PCH2)
   15302 		return wm_set_eee_pchlan(sc);
   15303 
   15304 	return 0;
   15305 }
   15306 
   15307 /*
   15308  * Workarounds (mainly PHY related).
   15309  * Basically, PHY's workarounds are in the PHY drivers.
   15310  */
   15311 
   15312 /* Work-around for 82566 Kumeran PCS lock loss */
   15313 static int
   15314 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15315 {
   15316 	struct mii_data *mii = &sc->sc_mii;
   15317 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15318 	int i, reg, rv;
   15319 	uint16_t phyreg;
   15320 
   15321 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15322 		device_xname(sc->sc_dev), __func__));
   15323 
   15324 	/* If the link is not up, do nothing */
   15325 	if ((status & STATUS_LU) == 0)
   15326 		return 0;
   15327 
   15328 	/* Nothing to do if the link is other than 1Gbps */
   15329 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15330 		return 0;
   15331 
   15332 	for (i = 0; i < 10; i++) {
   15333 		/* read twice */
   15334 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15335 		if (rv != 0)
   15336 			return rv;
   15337 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15338 		if (rv != 0)
   15339 			return rv;
   15340 
   15341 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15342 			goto out;	/* GOOD! */
   15343 
   15344 		/* Reset the PHY */
   15345 		wm_reset_phy(sc);
   15346 		delay(5*1000);
   15347 	}
   15348 
   15349 	/* Disable GigE link negotiation */
   15350 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15351 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15352 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15353 
   15354 	/*
   15355 	 * Call gig speed drop workaround on Gig disable before accessing
   15356 	 * any PHY registers.
   15357 	 */
   15358 	wm_gig_downshift_workaround_ich8lan(sc);
   15359 
   15360 out:
   15361 	return 0;
   15362 }
   15363 
   15364 /*
   15365  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15366  *  @sc: pointer to the HW structure
   15367  *
   15368  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15369  *  LPLU, Gig disable, MDIC PHY reset):
   15370  *    1) Set Kumeran Near-end loopback
   15371  *    2) Clear Kumeran Near-end loopback
   15372  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15373  */
   15374 static void
   15375 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15376 {
   15377 	uint16_t kmreg;
   15378 
   15379 	/* Only for igp3 */
   15380 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15381 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15382 			return;
   15383 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15384 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15385 			return;
   15386 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15387 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15388 	}
   15389 }
   15390 
   15391 /*
   15392  * Workaround for pch's PHYs
   15393  * XXX should be moved to new PHY driver?
   15394  */
   15395 static int
   15396 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15397 {
   15398 	device_t dev = sc->sc_dev;
   15399 	uint16_t phy_data;
   15400 	int rv;
   15401 
   15402 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15403 		device_xname(dev), __func__));
   15404 	KASSERT(sc->sc_type == WM_T_PCH);
   15405 
   15406 	if (sc->sc_phytype == WMPHY_82577)
   15407 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15408 			return rv;
   15409 
   15410 	/* XXX (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   15411 
   15412 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15413 
   15414 	/* 82578 */
   15415 	if (sc->sc_phytype == WMPHY_82578) {
   15416 		struct mii_softc *child;
   15417 
   15418 		/*
   15419 		 * Return registers to default by doing a soft reset then
   15420 		 * writing 0x3140 to the control register
   15421 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15422 		 */
   15423 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15424 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   15425 			PHY_RESET(child);
   15426 			rv = sc->sc_mii.mii_writereg(dev, 2, MII_BMCR,
   15427 			    0x3140);
   15428 			if (rv != 0)
   15429 				return rv;
   15430 		}
   15431 	}
   15432 
   15433 	/* Select page 0 */
   15434 	if ((rv = sc->phy.acquire(sc)) != 0)
   15435 		return rv;
   15436 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15437 	sc->phy.release(sc);
   15438 	if (rv != 0)
   15439 		return rv;
   15440 
   15441 	/*
   15442 	 * Configure the K1 Si workaround during phy reset assuming there is
   15443 	 * link so that it disables K1 if link is in 1Gbps.
   15444 	 */
   15445 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15446 		return rv;
   15447 
   15448 	/* Workaround for link disconnects on a busy hub in half duplex */
   15449 	rv = sc->phy.acquire(sc);
   15450 	if (rv)
   15451 		return rv;
   15452 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15453 	if (rv)
   15454 		goto release;
   15455 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15456 	    phy_data & 0x00ff);
   15457 	if (rv)
   15458 		goto release;
   15459 
   15460 	/* set MSE higher to enable link to stay up when noise is high */
   15461 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15462 release:
   15463 	sc->phy.release(sc);
   15464 
   15465 	return rv;
   15466 
   15467 
   15468 }
   15469 
   15470 /*
   15471  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15472  *  @sc:   pointer to the HW structure
   15473  */
   15474 static void
   15475 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15476 {
   15477 	device_t dev = sc->sc_dev;
   15478 	uint32_t mac_reg;
   15479 	uint16_t i, wuce;
   15480 	int count;
   15481 
   15482 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15483 		device_xname(sc->sc_dev), __func__));
   15484 
   15485 	if (sc->phy.acquire(sc) != 0)
   15486 		return;
   15487 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15488 		goto release;
   15489 
   15490 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15491 	count = wm_rar_count(sc);
   15492 	for (i = 0; i < count; i++) {
   15493 		uint16_t lo, hi;
   15494 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15495 		lo = (uint16_t)(mac_reg & 0xffff);
   15496 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15497 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15498 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15499 
   15500 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15501 		lo = (uint16_t)(mac_reg & 0xffff);
   15502 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15503 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15504 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15505 	}
   15506 
   15507 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15508 
   15509 release:
   15510 	sc->phy.release(sc);
   15511 }
   15512 
   15513 /*
   15514  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15515  *  done after every PHY reset.
   15516  */
   15517 static int
   15518 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15519 {
   15520 	device_t dev = sc->sc_dev;
   15521 	int rv;
   15522 
   15523 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15524 		device_xname(dev), __func__));
   15525 	KASSERT(sc->sc_type == WM_T_PCH2);
   15526 
   15527 	/* Set MDIO slow mode before any other MDIO access */
   15528 	rv = wm_set_mdio_slow_mode_hv(sc);
   15529 	if (rv != 0)
   15530 		return rv;
   15531 
   15532 	rv = sc->phy.acquire(sc);
   15533 	if (rv != 0)
   15534 		return rv;
   15535 	/* set MSE higher to enable link to stay up when noise is high */
   15536 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15537 	if (rv != 0)
   15538 		goto release;
   15539 	/* drop link after 5 times MSE threshold was reached */
   15540 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15541 release:
   15542 	sc->phy.release(sc);
   15543 
   15544 	return rv;
   15545 }
   15546 
   15547 /**
   15548  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15549  *  @link: link up bool flag
   15550  *
   15551  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15552  *  preventing further DMA write requests.  Workaround the issue by disabling
   15553  *  the de-assertion of the clock request when in 1Gpbs mode.
   15554  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15555  *  speeds in order to avoid Tx hangs.
   15556  **/
   15557 static int
   15558 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15559 {
   15560 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15561 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15562 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15563 	uint16_t phyreg;
   15564 
   15565 	if (link && (speed == STATUS_SPEED_1000)) {
   15566 		sc->phy.acquire(sc);
   15567 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15568 		    &phyreg);
   15569 		if (rv != 0)
   15570 			goto release;
   15571 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15572 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15573 		if (rv != 0)
   15574 			goto release;
   15575 		delay(20);
   15576 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15577 
   15578 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15579 		    &phyreg);
   15580 release:
   15581 		sc->phy.release(sc);
   15582 		return rv;
   15583 	}
   15584 
   15585 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15586 
   15587 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15588 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15589 	    || !link
   15590 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15591 		goto update_fextnvm6;
   15592 
   15593 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15594 
   15595 	/* Clear link status transmit timeout */
   15596 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15597 	if (speed == STATUS_SPEED_100) {
   15598 		/* Set inband Tx timeout to 5x10us for 100Half */
   15599 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15600 
   15601 		/* Do not extend the K1 entry latency for 100Half */
   15602 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15603 	} else {
   15604 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15605 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15606 
   15607 		/* Extend the K1 entry latency for 10 Mbps */
   15608 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15609 	}
   15610 
   15611 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15612 
   15613 update_fextnvm6:
   15614 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15615 	return 0;
   15616 }
   15617 
   15618 /*
   15619  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15620  *  @sc:   pointer to the HW structure
   15621  *  @link: link up bool flag
   15622  *
   15623  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15624  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15625  *  If link is down, the function will restore the default K1 setting located
   15626  *  in the NVM.
   15627  */
   15628 static int
   15629 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15630 {
   15631 	int k1_enable = sc->sc_nvm_k1_enabled;
   15632 
   15633 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15634 		device_xname(sc->sc_dev), __func__));
   15635 
   15636 	if (sc->phy.acquire(sc) != 0)
   15637 		return -1;
   15638 
   15639 	if (link) {
   15640 		k1_enable = 0;
   15641 
   15642 		/* Link stall fix for link up */
   15643 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15644 		    0x0100);
   15645 	} else {
   15646 		/* Link stall fix for link down */
   15647 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15648 		    0x4100);
   15649 	}
   15650 
   15651 	wm_configure_k1_ich8lan(sc, k1_enable);
   15652 	sc->phy.release(sc);
   15653 
   15654 	return 0;
   15655 }
   15656 
   15657 /*
   15658  *  wm_k1_workaround_lv - K1 Si workaround
   15659  *  @sc:   pointer to the HW structure
   15660  *
   15661  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   15662  *  Disable K1 for 1000 and 100 speeds
   15663  */
   15664 static int
   15665 wm_k1_workaround_lv(struct wm_softc *sc)
   15666 {
   15667 	uint32_t reg;
   15668 	uint16_t phyreg;
   15669 	int rv;
   15670 
   15671 	if (sc->sc_type != WM_T_PCH2)
   15672 		return 0;
   15673 
   15674 	/* Set K1 beacon duration based on 10Mbps speed */
   15675 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   15676 	if (rv != 0)
   15677 		return rv;
   15678 
   15679 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   15680 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   15681 		if (phyreg &
   15682 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   15683 			/* LV 1G/100 Packet drop issue wa  */
   15684 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   15685 			    &phyreg);
   15686 			if (rv != 0)
   15687 				return rv;
   15688 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   15689 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   15690 			    phyreg);
   15691 			if (rv != 0)
   15692 				return rv;
   15693 		} else {
   15694 			/* For 10Mbps */
   15695 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   15696 			reg &= ~FEXTNVM4_BEACON_DURATION;
   15697 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   15698 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   15699 		}
   15700 	}
   15701 
   15702 	return 0;
   15703 }
   15704 
   15705 /*
   15706  *  wm_link_stall_workaround_hv - Si workaround
   15707  *  @sc: pointer to the HW structure
   15708  *
   15709  *  This function works around a Si bug where the link partner can get
   15710  *  a link up indication before the PHY does. If small packets are sent
   15711  *  by the link partner they can be placed in the packet buffer without
   15712  *  being properly accounted for by the PHY and will stall preventing
   15713  *  further packets from being received.  The workaround is to clear the
   15714  *  packet buffer after the PHY detects link up.
   15715  */
   15716 static int
   15717 wm_link_stall_workaround_hv(struct wm_softc *sc)
   15718 {
   15719 	uint16_t phyreg;
   15720 
   15721 	if (sc->sc_phytype != WMPHY_82578)
   15722 		return 0;
   15723 
   15724 	/* Do not apply workaround if in PHY loopback bit 14 set */
   15725 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   15726 	if ((phyreg & BMCR_LOOP) != 0)
   15727 		return 0;
   15728 
   15729 	/* check if link is up and at 1Gbps */
   15730 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   15731 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15732 	    | BM_CS_STATUS_SPEED_MASK;
   15733 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15734 		| BM_CS_STATUS_SPEED_1000))
   15735 		return 0;
   15736 
   15737 	delay(200 * 1000);	/* XXX too big */
   15738 
   15739 	/* flush the packets in the fifo buffer */
   15740 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15741 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   15742 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15743 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   15744 
   15745 	return 0;
   15746 }
   15747 
   15748 static int
   15749 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   15750 {
   15751 	int rv;
   15752 	uint16_t reg;
   15753 
   15754 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   15755 	if (rv != 0)
   15756 		return rv;
   15757 
   15758 	return  wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   15759 	    reg | HV_KMRN_MDIO_SLOW);
   15760 }
   15761 
   15762 /*
   15763  *  wm_configure_k1_ich8lan - Configure K1 power state
   15764  *  @sc: pointer to the HW structure
   15765  *  @enable: K1 state to configure
   15766  *
   15767  *  Configure the K1 power state based on the provided parameter.
   15768  *  Assumes semaphore already acquired.
   15769  */
   15770 static void
   15771 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   15772 {
   15773 	uint32_t ctrl, ctrl_ext, tmp;
   15774 	uint16_t kmreg;
   15775 	int rv;
   15776 
   15777 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15778 
   15779 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   15780 	if (rv != 0)
   15781 		return;
   15782 
   15783 	if (k1_enable)
   15784 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   15785 	else
   15786 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   15787 
   15788 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   15789 	if (rv != 0)
   15790 		return;
   15791 
   15792 	delay(20);
   15793 
   15794 	ctrl = CSR_READ(sc, WMREG_CTRL);
   15795 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   15796 
   15797 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   15798 	tmp |= CTRL_FRCSPD;
   15799 
   15800 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   15801 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   15802 	CSR_WRITE_FLUSH(sc);
   15803 	delay(20);
   15804 
   15805 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   15806 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   15807 	CSR_WRITE_FLUSH(sc);
   15808 	delay(20);
   15809 
   15810 	return;
   15811 }
   15812 
   15813 /* special case - for 82575 - need to do manual init ... */
   15814 static void
   15815 wm_reset_init_script_82575(struct wm_softc *sc)
   15816 {
   15817 	/*
   15818 	 * remark: this is untested code - we have no board without EEPROM
   15819 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   15820 	 */
   15821 
   15822 	/* SerDes configuration via SERDESCTRL */
   15823 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   15824 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   15825 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   15826 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   15827 
   15828 	/* CCM configuration via CCMCTL register */
   15829 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   15830 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   15831 
   15832 	/* PCIe lanes configuration */
   15833 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   15834 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   15835 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   15836 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   15837 
   15838 	/* PCIe PLL Configuration */
   15839 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   15840 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   15841 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   15842 }
   15843 
   15844 static void
   15845 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   15846 {
   15847 	uint32_t reg;
   15848 	uint16_t nvmword;
   15849 	int rv;
   15850 
   15851 	if (sc->sc_type != WM_T_82580)
   15852 		return;
   15853 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   15854 		return;
   15855 
   15856 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   15857 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   15858 	if (rv != 0) {
   15859 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   15860 		    __func__);
   15861 		return;
   15862 	}
   15863 
   15864 	reg = CSR_READ(sc, WMREG_MDICNFG);
   15865 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   15866 		reg |= MDICNFG_DEST;
   15867 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   15868 		reg |= MDICNFG_COM_MDIO;
   15869 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15870 }
   15871 
   15872 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   15873 
   15874 static bool
   15875 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   15876 {
   15877 	uint32_t reg;
   15878 	uint16_t id1, id2;
   15879 	int i, rv;
   15880 
   15881 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15882 		device_xname(sc->sc_dev), __func__));
   15883 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15884 
   15885 	id1 = id2 = 0xffff;
   15886 	for (i = 0; i < 2; i++) {
   15887 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   15888 		    &id1);
   15889 		if ((rv != 0) || MII_INVALIDID(id1))
   15890 			continue;
   15891 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   15892 		    &id2);
   15893 		if ((rv != 0) || MII_INVALIDID(id2))
   15894 			continue;
   15895 		break;
   15896 	}
   15897 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   15898 		goto out;
   15899 
   15900 	/*
   15901 	 * In case the PHY needs to be in mdio slow mode,
   15902 	 * set slow mode and try to get the PHY id again.
   15903 	 */
   15904 	rv = 0;
   15905 	if (sc->sc_type < WM_T_PCH_LPT) {
   15906 		sc->phy.release(sc);
   15907 		wm_set_mdio_slow_mode_hv(sc);
   15908 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   15909 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   15910 		sc->phy.acquire(sc);
   15911 	}
   15912 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   15913 		printf("XXX return with false\n");
   15914 		return false;
   15915 	}
   15916 out:
   15917 	if (sc->sc_type >= WM_T_PCH_LPT) {
   15918 		/* Only unforce SMBus if ME is not active */
   15919 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15920 			uint16_t phyreg;
   15921 
   15922 			/* Unforce SMBus mode in PHY */
   15923 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   15924 			    CV_SMB_CTRL, &phyreg);
   15925 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15926 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   15927 			    CV_SMB_CTRL, phyreg);
   15928 
   15929 			/* Unforce SMBus mode in MAC */
   15930 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15931 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15932 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15933 		}
   15934 	}
   15935 	return true;
   15936 }
   15937 
   15938 static void
   15939 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   15940 {
   15941 	uint32_t reg;
   15942 	int i;
   15943 
   15944 	/* Set PHY Config Counter to 50msec */
   15945 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   15946 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   15947 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   15948 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   15949 
   15950 	/* Toggle LANPHYPC */
   15951 	reg = CSR_READ(sc, WMREG_CTRL);
   15952 	reg |= CTRL_LANPHYPC_OVERRIDE;
   15953 	reg &= ~CTRL_LANPHYPC_VALUE;
   15954 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15955 	CSR_WRITE_FLUSH(sc);
   15956 	delay(1000);
   15957 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   15958 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15959 	CSR_WRITE_FLUSH(sc);
   15960 
   15961 	if (sc->sc_type < WM_T_PCH_LPT)
   15962 		delay(50 * 1000);
   15963 	else {
   15964 		i = 20;
   15965 
   15966 		do {
   15967 			delay(5 * 1000);
   15968 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   15969 		    && i--);
   15970 
   15971 		delay(30 * 1000);
   15972 	}
   15973 }
   15974 
   15975 static int
   15976 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   15977 {
   15978 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   15979 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   15980 	uint32_t rxa;
   15981 	uint16_t scale = 0, lat_enc = 0;
   15982 	int32_t obff_hwm = 0;
   15983 	int64_t lat_ns, value;
   15984 
   15985 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15986 		device_xname(sc->sc_dev), __func__));
   15987 
   15988 	if (link) {
   15989 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   15990 		uint32_t status;
   15991 		uint16_t speed;
   15992 		pcireg_t preg;
   15993 
   15994 		status = CSR_READ(sc, WMREG_STATUS);
   15995 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   15996 		case STATUS_SPEED_10:
   15997 			speed = 10;
   15998 			break;
   15999 		case STATUS_SPEED_100:
   16000 			speed = 100;
   16001 			break;
   16002 		case STATUS_SPEED_1000:
   16003 			speed = 1000;
   16004 			break;
   16005 		default:
   16006 			device_printf(sc->sc_dev, "Unknown speed "
   16007 			    "(status = %08x)\n", status);
   16008 			return -1;
   16009 		}
   16010 
   16011 		/* Rx Packet Buffer Allocation size (KB) */
   16012 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16013 
   16014 		/*
   16015 		 * Determine the maximum latency tolerated by the device.
   16016 		 *
   16017 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16018 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16019 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16020 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16021 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16022 		 */
   16023 		lat_ns = ((int64_t)rxa * 1024 -
   16024 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16025 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16026 		if (lat_ns < 0)
   16027 			lat_ns = 0;
   16028 		else
   16029 			lat_ns /= speed;
   16030 		value = lat_ns;
   16031 
   16032 		while (value > LTRV_VALUE) {
   16033 			scale ++;
   16034 			value = howmany(value, __BIT(5));
   16035 		}
   16036 		if (scale > LTRV_SCALE_MAX) {
   16037 			printf("%s: Invalid LTR latency scale %d\n",
   16038 			    device_xname(sc->sc_dev), scale);
   16039 			return -1;
   16040 		}
   16041 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16042 
   16043 		/* Determine the maximum latency tolerated by the platform */
   16044 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16045 		    WM_PCI_LTR_CAP_LPT);
   16046 		max_snoop = preg & 0xffff;
   16047 		max_nosnoop = preg >> 16;
   16048 
   16049 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16050 
   16051 		if (lat_enc > max_ltr_enc) {
   16052 			lat_enc = max_ltr_enc;
   16053 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16054 			    * PCI_LTR_SCALETONS(
   16055 				    __SHIFTOUT(lat_enc,
   16056 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16057 		}
   16058 
   16059 		if (lat_ns) {
   16060 			lat_ns *= speed * 1000;
   16061 			lat_ns /= 8;
   16062 			lat_ns /= 1000000000;
   16063 			obff_hwm = (int32_t)(rxa - lat_ns);
   16064 		}
   16065 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16066 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16067 			    "(rxa = %d, lat_ns = %d)\n",
   16068 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16069 			return -1;
   16070 		}
   16071 	}
   16072 	/* Snoop and No-Snoop latencies the same */
   16073 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16074 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16075 
   16076 	/* Set OBFF high water mark */
   16077 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16078 	reg |= obff_hwm;
   16079 	CSR_WRITE(sc, WMREG_SVT, reg);
   16080 
   16081 	/* Enable OBFF */
   16082 	reg = CSR_READ(sc, WMREG_SVCR);
   16083 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16084 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16085 
   16086 	return 0;
   16087 }
   16088 
   16089 /*
   16090  * I210 Errata 25 and I211 Errata 10
   16091  * Slow System Clock.
   16092  */
   16093 static int
   16094 wm_pll_workaround_i210(struct wm_softc *sc)
   16095 {
   16096 	uint32_t mdicnfg, wuc;
   16097 	uint32_t reg;
   16098 	pcireg_t pcireg;
   16099 	uint32_t pmreg;
   16100 	uint16_t nvmword, tmp_nvmword;
   16101 	uint16_t phyval;
   16102 	bool wa_done = false;
   16103 	int i, rv = 0;
   16104 
   16105 	/* Get Power Management cap offset */
   16106 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16107 	    &pmreg, NULL) == 0)
   16108 		return -1;
   16109 
   16110 	/* Save WUC and MDICNFG registers */
   16111 	wuc = CSR_READ(sc, WMREG_WUC);
   16112 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16113 
   16114 	reg = mdicnfg & ~MDICNFG_DEST;
   16115 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16116 
   16117 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   16118 		nvmword = INVM_DEFAULT_AL;
   16119 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16120 
   16121 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16122 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16123 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16124 
   16125 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16126 			rv = 0;
   16127 			break; /* OK */
   16128 		} else
   16129 			rv = -1;
   16130 
   16131 		wa_done = true;
   16132 		/* Directly reset the internal PHY */
   16133 		reg = CSR_READ(sc, WMREG_CTRL);
   16134 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16135 
   16136 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16137 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16138 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16139 
   16140 		CSR_WRITE(sc, WMREG_WUC, 0);
   16141 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16142 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16143 
   16144 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16145 		    pmreg + PCI_PMCSR);
   16146 		pcireg |= PCI_PMCSR_STATE_D3;
   16147 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16148 		    pmreg + PCI_PMCSR, pcireg);
   16149 		delay(1000);
   16150 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16151 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16152 		    pmreg + PCI_PMCSR, pcireg);
   16153 
   16154 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16155 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16156 
   16157 		/* Restore WUC register */
   16158 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16159 	}
   16160 
   16161 	/* Restore MDICNFG setting */
   16162 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16163 	if (wa_done)
   16164 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16165 	return rv;
   16166 }
   16167 
   16168 static void
   16169 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16170 {
   16171 	uint32_t reg;
   16172 
   16173 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16174 		device_xname(sc->sc_dev), __func__));
   16175 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16176 	    || (sc->sc_type == WM_T_PCH_CNP));
   16177 
   16178 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16179 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16180 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16181 
   16182 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16183 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16184 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16185 }
   16186