Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.629
      1 /*	$NetBSD: if_wm.c,v 1.629 2019/02/28 16:56:35 khorben Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.629 2019/02/28 16:56:35 khorben Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 
    109 #include <sys/rndsource.h>
    110 
    111 #include <net/if.h>
    112 #include <net/if_dl.h>
    113 #include <net/if_media.h>
    114 #include <net/if_ether.h>
    115 
    116 #include <net/bpf.h>
    117 
    118 #include <net/rss_config.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/mdio.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    161 #else
    162 #define	DPRINTF(x, y)	__nothing
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544. We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 64 DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    203  * m_defrag() is called to reduce it.
    204  */
    205 #define	WM_NTXSEGS		64
    206 #define	WM_IFQUEUELEN		256
    207 #define	WM_TXQUEUELEN_MAX	64
    208 #define	WM_TXQUEUELEN_MAX_82547	16
    209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    212 #define	WM_NTXDESC_82542	256
    213 #define	WM_NTXDESC_82544	4096
    214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    219 
    220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    221 
    222 #define	WM_TXINTERQSIZE		256
    223 
    224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    226 #endif
    227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    229 #endif
    230 
    231 /*
    232  * Receive descriptor list size.  We have one Rx buffer for normal
    233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    234  * packet.  We allocate 256 receive descriptors, each with a 2k
    235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    236  */
    237 #define	WM_NRXDESC		256
    238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    241 
    242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 typedef union txdescs {
    250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    252 } txdescs_t;
    253 
    254 typedef union rxdescs {
    255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    258 } rxdescs_t;
    259 
    260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    262 
    263 /*
    264  * Software state for transmit jobs.
    265  */
    266 struct wm_txsoft {
    267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    269 	int txs_firstdesc;		/* first descriptor in packet */
    270 	int txs_lastdesc;		/* last descriptor in packet */
    271 	int txs_ndesc;			/* # of descriptors used */
    272 };
    273 
    274 /*
    275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    277  * them together.
    278  */
    279 struct wm_rxsoft {
    280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    282 };
    283 
    284 #define WM_LINKUP_TIMEOUT	50
    285 
    286 static uint16_t swfwphysem[] = {
    287 	SWFW_PHY0_SM,
    288 	SWFW_PHY1_SM,
    289 	SWFW_PHY2_SM,
    290 	SWFW_PHY3_SM
    291 };
    292 
    293 static const uint32_t wm_82580_rxpbs_table[] = {
    294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    295 };
    296 
    297 struct wm_softc;
    298 
    299 #ifdef WM_EVENT_COUNTERS
    300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    302 	struct evcnt qname##_ev_##evname;
    303 
    304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    305 	do {								\
    306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    308 		    "%s%02d%s", #qname, (qnum), #evname);		\
    309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    310 		    (evtype), NULL, (xname),				\
    311 		    (q)->qname##_##evname##_evcnt_name);		\
    312 	} while (0)
    313 
    314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    316 
    317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    319 
    320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    321 	evcnt_detach(&(q)->qname##_ev_##evname);
    322 #endif /* WM_EVENT_COUNTERS */
    323 
    324 struct wm_txqueue {
    325 	kmutex_t *txq_lock;		/* lock for tx operations */
    326 
    327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    328 
    329 	/* Software state for the transmit descriptors. */
    330 	int txq_num;			/* must be a power of two */
    331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    332 
    333 	/* TX control data structures. */
    334 	int txq_ndesc;			/* must be a power of two */
    335 	size_t txq_descsize;		/* a tx descriptor size */
    336 	txdescs_t *txq_descs_u;
    337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    339 	int txq_desc_rseg;		/* real number of control segment */
    340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    341 #define	txq_descs	txq_descs_u->sctxu_txdescs
    342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    343 
    344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    345 
    346 	int txq_free;			/* number of free Tx descriptors */
    347 	int txq_next;			/* next ready Tx descriptor */
    348 
    349 	int txq_sfree;			/* number of free Tx jobs */
    350 	int txq_snext;			/* next free Tx job */
    351 	int txq_sdirty;			/* dirty Tx jobs */
    352 
    353 	/* These 4 variables are used only on the 82547. */
    354 	int txq_fifo_size;		/* Tx FIFO size */
    355 	int txq_fifo_head;		/* current head of FIFO */
    356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    358 
    359 	/*
    360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    361 	 * CPUs. This queue intermediate them without block.
    362 	 */
    363 	pcq_t *txq_interq;
    364 
    365 	/*
    366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    367 	 * to manage Tx H/W queue's busy flag.
    368 	 */
    369 	int txq_flags;			/* flags for H/W queue, see below */
    370 #define	WM_TXQ_NO_SPACE	0x1
    371 
    372 	bool txq_stopping;
    373 
    374 	bool txq_sending;
    375 	time_t txq_lastsent;
    376 
    377 	uint32_t txq_packets;		/* for AIM */
    378 	uint32_t txq_bytes;		/* for AIM */
    379 #ifdef WM_EVENT_COUNTERS
    380 	/* TX event counters */
    381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    386 					    /* XXX not used? */
    387 
    388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    393 	WM_Q_EVCNT_DEFINE(txq, tsopain)     /* Painful header manip. for TSO */
    394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    396 					    /* other than toomanyseg */
    397 
    398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    401 
    402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    404 #endif /* WM_EVENT_COUNTERS */
    405 };
    406 
    407 struct wm_rxqueue {
    408 	kmutex_t *rxq_lock;		/* lock for rx operations */
    409 
    410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    411 
    412 	/* Software state for the receive descriptors. */
    413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    414 
    415 	/* RX control data structures. */
    416 	int rxq_ndesc;			/* must be a power of two */
    417 	size_t rxq_descsize;		/* a rx descriptor size */
    418 	rxdescs_t *rxq_descs_u;
    419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    421 	int rxq_desc_rseg;		/* real number of control segment */
    422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    426 
    427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    428 
    429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    430 	int rxq_discard;
    431 	int rxq_len;
    432 	struct mbuf *rxq_head;
    433 	struct mbuf *rxq_tail;
    434 	struct mbuf **rxq_tailp;
    435 
    436 	bool rxq_stopping;
    437 
    438 	uint32_t rxq_packets;		/* for AIM */
    439 	uint32_t rxq_bytes;		/* for AIM */
    440 #ifdef WM_EVENT_COUNTERS
    441 	/* RX event counters */
    442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    444 
    445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    447 #endif
    448 };
    449 
    450 struct wm_queue {
    451 	int wmq_id;			/* index of TX/RX queues */
    452 	int wmq_intr_idx;		/* index of MSI-X tables */
    453 
    454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    455 	bool wmq_set_itr;
    456 
    457 	struct wm_txqueue wmq_txq;
    458 	struct wm_rxqueue wmq_rxq;
    459 
    460 	void *wmq_si;
    461 };
    462 
    463 struct wm_phyop {
    464 	int (*acquire)(struct wm_softc *);
    465 	void (*release)(struct wm_softc *);
    466 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    467 	int (*writereg_locked)(device_t, int, int, uint16_t);
    468 	int reset_delay_us;
    469 };
    470 
    471 struct wm_nvmop {
    472 	int (*acquire)(struct wm_softc *);
    473 	void (*release)(struct wm_softc *);
    474 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    475 };
    476 
    477 /*
    478  * Software state per device.
    479  */
    480 struct wm_softc {
    481 	device_t sc_dev;		/* generic device information */
    482 	bus_space_tag_t sc_st;		/* bus space tag */
    483 	bus_space_handle_t sc_sh;	/* bus space handle */
    484 	bus_size_t sc_ss;		/* bus space size */
    485 	bus_space_tag_t sc_iot;		/* I/O space tag */
    486 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    487 	bus_size_t sc_ios;		/* I/O space size */
    488 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    489 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    490 	bus_size_t sc_flashs;		/* flash registers space size */
    491 	off_t sc_flashreg_offset;	/*
    492 					 * offset to flash registers from
    493 					 * start of BAR
    494 					 */
    495 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    496 
    497 	struct ethercom sc_ethercom;	/* ethernet common data */
    498 	struct mii_data sc_mii;		/* MII/media information */
    499 
    500 	pci_chipset_tag_t sc_pc;
    501 	pcitag_t sc_pcitag;
    502 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    503 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    504 
    505 	uint16_t sc_pcidevid;		/* PCI device ID */
    506 	wm_chip_type sc_type;		/* MAC type */
    507 	int sc_rev;			/* MAC revision */
    508 	wm_phy_type sc_phytype;		/* PHY type */
    509 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    510 #define	WM_MEDIATYPE_UNKNOWN		0x00
    511 #define	WM_MEDIATYPE_FIBER		0x01
    512 #define	WM_MEDIATYPE_COPPER		0x02
    513 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    514 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    515 	int sc_flags;			/* flags; see below */
    516 	int sc_if_flags;		/* last if_flags */
    517 	int sc_ec_capenable;		/* last ec_capenable */
    518 	int sc_flowflags;		/* 802.3x flow control flags */
    519 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    520 	int sc_align_tweak;
    521 
    522 	void *sc_ihs[WM_MAX_NINTR];	/*
    523 					 * interrupt cookie.
    524 					 * - legacy and msi use sc_ihs[0] only
    525 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    526 					 */
    527 	pci_intr_handle_t *sc_intrs;	/*
    528 					 * legacy and msi use sc_intrs[0] only
    529 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    530 					 */
    531 	int sc_nintrs;			/* number of interrupts */
    532 
    533 	int sc_link_intr_idx;		/* index of MSI-X tables */
    534 
    535 	callout_t sc_tick_ch;		/* tick callout */
    536 	bool sc_core_stopping;
    537 
    538 	int sc_nvm_ver_major;
    539 	int sc_nvm_ver_minor;
    540 	int sc_nvm_ver_build;
    541 	int sc_nvm_addrbits;		/* NVM address bits */
    542 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    543 	int sc_ich8_flash_base;
    544 	int sc_ich8_flash_bank_size;
    545 	int sc_nvm_k1_enabled;
    546 
    547 	int sc_nqueues;
    548 	struct wm_queue *sc_queue;
    549 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    550 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    551 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    552 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    553 
    554 	int sc_affinity_offset;
    555 
    556 #ifdef WM_EVENT_COUNTERS
    557 	/* Event counters. */
    558 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    559 
    560 	/* WM_T_82542_2_1 only */
    561 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    562 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    563 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    564 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    565 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    566 #endif /* WM_EVENT_COUNTERS */
    567 
    568 	/* This variable are used only on the 82547. */
    569 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    570 
    571 	uint32_t sc_ctrl;		/* prototype CTRL register */
    572 #if 0
    573 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    574 #endif
    575 	uint32_t sc_icr;		/* prototype interrupt bits */
    576 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    577 	uint32_t sc_tctl;		/* prototype TCTL register */
    578 	uint32_t sc_rctl;		/* prototype RCTL register */
    579 	uint32_t sc_txcw;		/* prototype TXCW register */
    580 	uint32_t sc_tipg;		/* prototype TIPG register */
    581 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    582 	uint32_t sc_pba;		/* prototype PBA register */
    583 
    584 	int sc_tbi_linkup;		/* TBI link status */
    585 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    586 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    587 
    588 	int sc_mchash_type;		/* multicast filter offset */
    589 
    590 	krndsource_t rnd_source;	/* random source */
    591 
    592 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    593 
    594 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    595 	kmutex_t *sc_ich_phymtx;	/*
    596 					 * 82574/82583/ICH/PCH specific PHY
    597 					 * mutex. For 82574/82583, the mutex
    598 					 * is used for both PHY and NVM.
    599 					 */
    600 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    601 
    602 	struct wm_phyop phy;
    603 	struct wm_nvmop nvm;
    604 };
    605 
    606 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    607 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    608 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    609 
    610 #define	WM_RXCHAIN_RESET(rxq)						\
    611 do {									\
    612 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    613 	*(rxq)->rxq_tailp = NULL;					\
    614 	(rxq)->rxq_len = 0;						\
    615 } while (/*CONSTCOND*/0)
    616 
    617 #define	WM_RXCHAIN_LINK(rxq, m)						\
    618 do {									\
    619 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    620 	(rxq)->rxq_tailp = &(m)->m_next;				\
    621 } while (/*CONSTCOND*/0)
    622 
    623 #ifdef WM_EVENT_COUNTERS
    624 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    625 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    626 
    627 #define WM_Q_EVCNT_INCR(qname, evname)			\
    628 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    629 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    630 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    631 #else /* !WM_EVENT_COUNTERS */
    632 #define	WM_EVCNT_INCR(ev)	/* nothing */
    633 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    634 
    635 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    636 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    637 #endif /* !WM_EVENT_COUNTERS */
    638 
    639 #define	CSR_READ(sc, reg)						\
    640 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    641 #define	CSR_WRITE(sc, reg, val)						\
    642 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    643 #define	CSR_WRITE_FLUSH(sc)						\
    644 	(void) CSR_READ((sc), WMREG_STATUS)
    645 
    646 #define ICH8_FLASH_READ32(sc, reg)					\
    647 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    648 	    (reg) + sc->sc_flashreg_offset)
    649 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    650 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    651 	    (reg) + sc->sc_flashreg_offset, (data))
    652 
    653 #define ICH8_FLASH_READ16(sc, reg)					\
    654 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    655 	    (reg) + sc->sc_flashreg_offset)
    656 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    657 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    658 	    (reg) + sc->sc_flashreg_offset, (data))
    659 
    660 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    661 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    662 
    663 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    664 #define	WM_CDTXADDR_HI(txq, x)						\
    665 	(sizeof(bus_addr_t) == 8 ?					\
    666 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    667 
    668 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    669 #define	WM_CDRXADDR_HI(rxq, x)						\
    670 	(sizeof(bus_addr_t) == 8 ?					\
    671 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    672 
    673 /*
    674  * Register read/write functions.
    675  * Other than CSR_{READ|WRITE}().
    676  */
    677 #if 0
    678 static inline uint32_t wm_io_read(struct wm_softc *, int);
    679 #endif
    680 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    681 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    682     uint32_t, uint32_t);
    683 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    684 
    685 /*
    686  * Descriptor sync/init functions.
    687  */
    688 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    689 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    690 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    691 
    692 /*
    693  * Device driver interface functions and commonly used functions.
    694  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    695  */
    696 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    697 static int	wm_match(device_t, cfdata_t, void *);
    698 static void	wm_attach(device_t, device_t, void *);
    699 static int	wm_detach(device_t, int);
    700 static bool	wm_suspend(device_t, const pmf_qual_t *);
    701 static bool	wm_resume(device_t, const pmf_qual_t *);
    702 static void	wm_watchdog(struct ifnet *);
    703 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    704     uint16_t *);
    705 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    706     uint16_t *);
    707 static void	wm_tick(void *);
    708 static int	wm_ifflags_cb(struct ethercom *);
    709 static int	wm_ioctl(struct ifnet *, u_long, void *);
    710 /* MAC address related */
    711 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    712 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    713 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    714 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    715 static int	wm_rar_count(struct wm_softc *);
    716 static void	wm_set_filter(struct wm_softc *);
    717 /* Reset and init related */
    718 static void	wm_set_vlan(struct wm_softc *);
    719 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    720 static void	wm_get_auto_rd_done(struct wm_softc *);
    721 static void	wm_lan_init_done(struct wm_softc *);
    722 static void	wm_get_cfg_done(struct wm_softc *);
    723 static int	wm_phy_post_reset(struct wm_softc *);
    724 static int	wm_write_smbus_addr(struct wm_softc *);
    725 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    726 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    727 static void	wm_initialize_hardware_bits(struct wm_softc *);
    728 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    729 static int	wm_reset_phy(struct wm_softc *);
    730 static void	wm_flush_desc_rings(struct wm_softc *);
    731 static void	wm_reset(struct wm_softc *);
    732 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    733 static void	wm_rxdrain(struct wm_rxqueue *);
    734 static void	wm_init_rss(struct wm_softc *);
    735 static void	wm_adjust_qnum(struct wm_softc *, int);
    736 static inline bool	wm_is_using_msix(struct wm_softc *);
    737 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    738 static int	wm_softint_establish(struct wm_softc *, int, int);
    739 static int	wm_setup_legacy(struct wm_softc *);
    740 static int	wm_setup_msix(struct wm_softc *);
    741 static int	wm_init(struct ifnet *);
    742 static int	wm_init_locked(struct ifnet *);
    743 static void	wm_unset_stopping_flags(struct wm_softc *);
    744 static void	wm_set_stopping_flags(struct wm_softc *);
    745 static void	wm_stop(struct ifnet *, int);
    746 static void	wm_stop_locked(struct ifnet *, int);
    747 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    748 static void	wm_82547_txfifo_stall(void *);
    749 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    750 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    751 /* DMA related */
    752 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    753 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    754 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    755 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    756     struct wm_txqueue *);
    757 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    758 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    759 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    760     struct wm_rxqueue *);
    761 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    762 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    763 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    764 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    765 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    766 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    767 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    768     struct wm_txqueue *);
    769 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    770     struct wm_rxqueue *);
    771 static int	wm_alloc_txrx_queues(struct wm_softc *);
    772 static void	wm_free_txrx_queues(struct wm_softc *);
    773 static int	wm_init_txrx_queues(struct wm_softc *);
    774 /* Start */
    775 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    776     struct wm_txsoft *, uint32_t *, uint8_t *);
    777 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    778 static void	wm_start(struct ifnet *);
    779 static void	wm_start_locked(struct ifnet *);
    780 static int	wm_transmit(struct ifnet *, struct mbuf *);
    781 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    782 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    783     bool);
    784 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    785     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    786 static void	wm_nq_start(struct ifnet *);
    787 static void	wm_nq_start_locked(struct ifnet *);
    788 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    789 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    790 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    791     bool);
    792 static void	wm_deferred_start_locked(struct wm_txqueue *);
    793 static void	wm_handle_queue(void *);
    794 /* Interrupt */
    795 static bool	wm_txeof(struct wm_txqueue *, u_int);
    796 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    797 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    798 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    799 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    800 static void	wm_linkintr(struct wm_softc *, uint32_t);
    801 static int	wm_intr_legacy(void *);
    802 static inline void	wm_txrxintr_disable(struct wm_queue *);
    803 static inline void	wm_txrxintr_enable(struct wm_queue *);
    804 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    805 static int	wm_txrxintr_msix(void *);
    806 static int	wm_linkintr_msix(void *);
    807 
    808 /*
    809  * Media related.
    810  * GMII, SGMII, TBI, SERDES and SFP.
    811  */
    812 /* Common */
    813 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    814 /* GMII related */
    815 static void	wm_gmii_reset(struct wm_softc *);
    816 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    817 static int	wm_get_phy_id_82575(struct wm_softc *);
    818 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    819 static int	wm_gmii_mediachange(struct ifnet *);
    820 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    821 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    822 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    823 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    824 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    825 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    826 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    827 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    828 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    829 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    830 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    831 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    832 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    833 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    834 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    835 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    836 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    837 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    838 	bool);
    839 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    840 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    841 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    842 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    843 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    844 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    845 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    846 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    847 static void	wm_gmii_statchg(struct ifnet *);
    848 /*
    849  * kumeran related (80003, ICH* and PCH*).
    850  * These functions are not for accessing MII registers but for accessing
    851  * kumeran specific registers.
    852  */
    853 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    854 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    855 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    856 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    857 /* EMI register related */
    858 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    859 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    860 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    861 /* SGMII */
    862 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    863 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    864 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    865 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    866 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    867 /* TBI related */
    868 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    869 static void	wm_tbi_mediainit(struct wm_softc *);
    870 static int	wm_tbi_mediachange(struct ifnet *);
    871 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    872 static int	wm_check_for_link(struct wm_softc *);
    873 static void	wm_tbi_tick(struct wm_softc *);
    874 /* SERDES related */
    875 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    876 static int	wm_serdes_mediachange(struct ifnet *);
    877 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    878 static void	wm_serdes_tick(struct wm_softc *);
    879 /* SFP related */
    880 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    881 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    882 
    883 /*
    884  * NVM related.
    885  * Microwire, SPI (w/wo EERD) and Flash.
    886  */
    887 /* Misc functions */
    888 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    889 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    890 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    891 /* Microwire */
    892 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    893 /* SPI */
    894 static int	wm_nvm_ready_spi(struct wm_softc *);
    895 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    896 /* Using with EERD */
    897 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    898 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    899 /* Flash */
    900 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    901     unsigned int *);
    902 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    903 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    904 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    905     uint32_t *);
    906 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    907 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    908 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    909 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    910 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    911 /* iNVM */
    912 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    913 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    914 /* Lock, detecting NVM type, validate checksum and read */
    915 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    916 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    917 static int	wm_nvm_validate_checksum(struct wm_softc *);
    918 static void	wm_nvm_version_invm(struct wm_softc *);
    919 static void	wm_nvm_version(struct wm_softc *);
    920 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    921 
    922 /*
    923  * Hardware semaphores.
    924  * Very complexed...
    925  */
    926 static int	wm_get_null(struct wm_softc *);
    927 static void	wm_put_null(struct wm_softc *);
    928 static int	wm_get_eecd(struct wm_softc *);
    929 static void	wm_put_eecd(struct wm_softc *);
    930 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    931 static void	wm_put_swsm_semaphore(struct wm_softc *);
    932 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    933 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    934 static int	wm_get_nvm_80003(struct wm_softc *);
    935 static void	wm_put_nvm_80003(struct wm_softc *);
    936 static int	wm_get_nvm_82571(struct wm_softc *);
    937 static void	wm_put_nvm_82571(struct wm_softc *);
    938 static int	wm_get_phy_82575(struct wm_softc *);
    939 static void	wm_put_phy_82575(struct wm_softc *);
    940 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    941 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    942 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    943 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    944 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    945 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    946 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    947 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    948 
    949 /*
    950  * Management mode and power management related subroutines.
    951  * BMC, AMT, suspend/resume and EEE.
    952  */
    953 #if 0
    954 static int	wm_check_mng_mode(struct wm_softc *);
    955 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    956 static int	wm_check_mng_mode_82574(struct wm_softc *);
    957 static int	wm_check_mng_mode_generic(struct wm_softc *);
    958 #endif
    959 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    960 static bool	wm_phy_resetisblocked(struct wm_softc *);
    961 static void	wm_get_hw_control(struct wm_softc *);
    962 static void	wm_release_hw_control(struct wm_softc *);
    963 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    964 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    965 static void	wm_init_manageability(struct wm_softc *);
    966 static void	wm_release_manageability(struct wm_softc *);
    967 static void	wm_get_wakeup(struct wm_softc *);
    968 static int	wm_ulp_disable(struct wm_softc *);
    969 static int	wm_enable_phy_wakeup(struct wm_softc *);
    970 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    971 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    972 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    973 static void	wm_enable_wakeup(struct wm_softc *);
    974 static void	wm_disable_aspm(struct wm_softc *);
    975 /* LPLU (Low Power Link Up) */
    976 static void	wm_lplu_d0_disable(struct wm_softc *);
    977 /* EEE */
    978 static int	wm_set_eee_i350(struct wm_softc *);
    979 static int	wm_set_eee_pchlan(struct wm_softc *);
    980 static int	wm_set_eee(struct wm_softc *);
    981 
    982 /*
    983  * Workarounds (mainly PHY related).
    984  * Basically, PHY's workarounds are in the PHY drivers.
    985  */
    986 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    987 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    988 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
    989 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
    990 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
    991 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    992 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    993 static int	wm_k1_workaround_lv(struct wm_softc *);
    994 static int	wm_link_stall_workaround_hv(struct wm_softc *);
    995 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    996 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    997 static void	wm_reset_init_script_82575(struct wm_softc *);
    998 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    999 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1000 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1001 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1002 static int	wm_pll_workaround_i210(struct wm_softc *);
   1003 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1004 
   1005 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1006     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1007 
   1008 /*
   1009  * Devices supported by this driver.
   1010  */
   1011 static const struct wm_product {
   1012 	pci_vendor_id_t		wmp_vendor;
   1013 	pci_product_id_t	wmp_product;
   1014 	const char		*wmp_name;
   1015 	wm_chip_type		wmp_type;
   1016 	uint32_t		wmp_flags;
   1017 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1018 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1019 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1020 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1021 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1022 } wm_products[] = {
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1024 	  "Intel i82542 1000BASE-X Ethernet",
   1025 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1028 	  "Intel i82543GC 1000BASE-X Ethernet",
   1029 	  WM_T_82543,		WMP_F_FIBER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1032 	  "Intel i82543GC 1000BASE-T Ethernet",
   1033 	  WM_T_82543,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1036 	  "Intel i82544EI 1000BASE-T Ethernet",
   1037 	  WM_T_82544,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1040 	  "Intel i82544EI 1000BASE-X Ethernet",
   1041 	  WM_T_82544,		WMP_F_FIBER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1044 	  "Intel i82544GC 1000BASE-T Ethernet",
   1045 	  WM_T_82544,		WMP_F_COPPER },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1048 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1049 	  WM_T_82544,		WMP_F_COPPER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1052 	  "Intel i82540EM 1000BASE-T Ethernet",
   1053 	  WM_T_82540,		WMP_F_COPPER },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1056 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1057 	  WM_T_82540,		WMP_F_COPPER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1060 	  "Intel i82540EP 1000BASE-T Ethernet",
   1061 	  WM_T_82540,		WMP_F_COPPER },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1064 	  "Intel i82540EP 1000BASE-T Ethernet",
   1065 	  WM_T_82540,		WMP_F_COPPER },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1068 	  "Intel i82540EP 1000BASE-T Ethernet",
   1069 	  WM_T_82540,		WMP_F_COPPER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1072 	  "Intel i82545EM 1000BASE-T Ethernet",
   1073 	  WM_T_82545,		WMP_F_COPPER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1076 	  "Intel i82545GM 1000BASE-T Ethernet",
   1077 	  WM_T_82545_3,		WMP_F_COPPER },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1080 	  "Intel i82545GM 1000BASE-X Ethernet",
   1081 	  WM_T_82545_3,		WMP_F_FIBER },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1084 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1085 	  WM_T_82545_3,		WMP_F_SERDES },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1088 	  "Intel i82546EB 1000BASE-T Ethernet",
   1089 	  WM_T_82546,		WMP_F_COPPER },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1092 	  "Intel i82546EB 1000BASE-T Ethernet",
   1093 	  WM_T_82546,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1096 	  "Intel i82545EM 1000BASE-X Ethernet",
   1097 	  WM_T_82545,		WMP_F_FIBER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1100 	  "Intel i82546EB 1000BASE-X Ethernet",
   1101 	  WM_T_82546,		WMP_F_FIBER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1104 	  "Intel i82546GB 1000BASE-T Ethernet",
   1105 	  WM_T_82546_3,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1108 	  "Intel i82546GB 1000BASE-X Ethernet",
   1109 	  WM_T_82546_3,		WMP_F_FIBER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1112 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1113 	  WM_T_82546_3,		WMP_F_SERDES },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1116 	  "i82546GB quad-port Gigabit Ethernet",
   1117 	  WM_T_82546_3,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1120 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1121 	  WM_T_82546_3,		WMP_F_COPPER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1124 	  "Intel PRO/1000MT (82546GB)",
   1125 	  WM_T_82546_3,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1128 	  "Intel i82541EI 1000BASE-T Ethernet",
   1129 	  WM_T_82541,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1132 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1133 	  WM_T_82541,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1136 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1137 	  WM_T_82541,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1140 	  "Intel i82541ER 1000BASE-T Ethernet",
   1141 	  WM_T_82541_2,		WMP_F_COPPER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1144 	  "Intel i82541GI 1000BASE-T Ethernet",
   1145 	  WM_T_82541_2,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1148 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1149 	  WM_T_82541_2,		WMP_F_COPPER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1152 	  "Intel i82541PI 1000BASE-T Ethernet",
   1153 	  WM_T_82541_2,		WMP_F_COPPER },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1156 	  "Intel i82547EI 1000BASE-T Ethernet",
   1157 	  WM_T_82547,		WMP_F_COPPER },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1160 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1161 	  WM_T_82547,		WMP_F_COPPER },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1164 	  "Intel i82547GI 1000BASE-T Ethernet",
   1165 	  WM_T_82547_2,		WMP_F_COPPER },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1168 	  "Intel PRO/1000 PT (82571EB)",
   1169 	  WM_T_82571,		WMP_F_COPPER },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1172 	  "Intel PRO/1000 PF (82571EB)",
   1173 	  WM_T_82571,		WMP_F_FIBER },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1176 	  "Intel PRO/1000 PB (82571EB)",
   1177 	  WM_T_82571,		WMP_F_SERDES },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1180 	  "Intel PRO/1000 QT (82571EB)",
   1181 	  WM_T_82571,		WMP_F_COPPER },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1184 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1185 	  WM_T_82571,		WMP_F_COPPER, },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1188 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1189 	  WM_T_82571,		WMP_F_COPPER, },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1192 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1193 	  WM_T_82571,		WMP_F_SERDES, },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1196 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1197 	  WM_T_82571,		WMP_F_SERDES, },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1200 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1201 	  WM_T_82571,		WMP_F_FIBER, },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1204 	  "Intel i82572EI 1000baseT Ethernet",
   1205 	  WM_T_82572,		WMP_F_COPPER },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1208 	  "Intel i82572EI 1000baseX Ethernet",
   1209 	  WM_T_82572,		WMP_F_FIBER },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1212 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1213 	  WM_T_82572,		WMP_F_SERDES },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1216 	  "Intel i82572EI 1000baseT Ethernet",
   1217 	  WM_T_82572,		WMP_F_COPPER },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1220 	  "Intel i82573E",
   1221 	  WM_T_82573,		WMP_F_COPPER },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1224 	  "Intel i82573E IAMT",
   1225 	  WM_T_82573,		WMP_F_COPPER },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1228 	  "Intel i82573L Gigabit Ethernet",
   1229 	  WM_T_82573,		WMP_F_COPPER },
   1230 
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1232 	  "Intel i82574L",
   1233 	  WM_T_82574,		WMP_F_COPPER },
   1234 
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1236 	  "Intel i82574L",
   1237 	  WM_T_82574,		WMP_F_COPPER },
   1238 
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1240 	  "Intel i82583V",
   1241 	  WM_T_82583,		WMP_F_COPPER },
   1242 
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1244 	  "i80003 dual 1000baseT Ethernet",
   1245 	  WM_T_80003,		WMP_F_COPPER },
   1246 
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1248 	  "i80003 dual 1000baseX Ethernet",
   1249 	  WM_T_80003,		WMP_F_COPPER },
   1250 
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1252 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1253 	  WM_T_80003,		WMP_F_SERDES },
   1254 
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1256 	  "Intel i80003 1000baseT Ethernet",
   1257 	  WM_T_80003,		WMP_F_COPPER },
   1258 
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1260 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1261 	  WM_T_80003,		WMP_F_SERDES },
   1262 
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1264 	  "Intel i82801H (M_AMT) LAN Controller",
   1265 	  WM_T_ICH8,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1267 	  "Intel i82801H (AMT) LAN Controller",
   1268 	  WM_T_ICH8,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1270 	  "Intel i82801H LAN Controller",
   1271 	  WM_T_ICH8,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1273 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1274 	  WM_T_ICH8,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1276 	  "Intel i82801H (M) LAN Controller",
   1277 	  WM_T_ICH8,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1279 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1280 	  WM_T_ICH8,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1282 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1283 	  WM_T_ICH8,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1285 	  "82567V-3 LAN Controller",
   1286 	  WM_T_ICH8,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1288 	  "82801I (AMT) LAN Controller",
   1289 	  WM_T_ICH9,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1291 	  "82801I 10/100 LAN Controller",
   1292 	  WM_T_ICH9,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1294 	  "82801I (G) 10/100 LAN Controller",
   1295 	  WM_T_ICH9,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1297 	  "82801I (GT) 10/100 LAN Controller",
   1298 	  WM_T_ICH9,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1300 	  "82801I (C) LAN Controller",
   1301 	  WM_T_ICH9,		WMP_F_COPPER },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1303 	  "82801I mobile LAN Controller",
   1304 	  WM_T_ICH9,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1306 	  "82801I mobile (V) LAN Controller",
   1307 	  WM_T_ICH9,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1309 	  "82801I mobile (AMT) LAN Controller",
   1310 	  WM_T_ICH9,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1312 	  "82567LM-4 LAN Controller",
   1313 	  WM_T_ICH9,		WMP_F_COPPER },
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1315 	  "82567LM-2 LAN Controller",
   1316 	  WM_T_ICH10,		WMP_F_COPPER },
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1318 	  "82567LF-2 LAN Controller",
   1319 	  WM_T_ICH10,		WMP_F_COPPER },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1321 	  "82567LM-3 LAN Controller",
   1322 	  WM_T_ICH10,		WMP_F_COPPER },
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1324 	  "82567LF-3 LAN Controller",
   1325 	  WM_T_ICH10,		WMP_F_COPPER },
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1327 	  "82567V-2 LAN Controller",
   1328 	  WM_T_ICH10,		WMP_F_COPPER },
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1330 	  "82567V-3? LAN Controller",
   1331 	  WM_T_ICH10,		WMP_F_COPPER },
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1333 	  "HANKSVILLE LAN Controller",
   1334 	  WM_T_ICH10,		WMP_F_COPPER },
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1336 	  "PCH LAN (82577LM) Controller",
   1337 	  WM_T_PCH,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1339 	  "PCH LAN (82577LC) Controller",
   1340 	  WM_T_PCH,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1342 	  "PCH LAN (82578DM) Controller",
   1343 	  WM_T_PCH,		WMP_F_COPPER },
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1345 	  "PCH LAN (82578DC) Controller",
   1346 	  WM_T_PCH,		WMP_F_COPPER },
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1348 	  "PCH2 LAN (82579LM) Controller",
   1349 	  WM_T_PCH2,		WMP_F_COPPER },
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1351 	  "PCH2 LAN (82579V) Controller",
   1352 	  WM_T_PCH2,		WMP_F_COPPER },
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1354 	  "82575EB dual-1000baseT Ethernet",
   1355 	  WM_T_82575,		WMP_F_COPPER },
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1357 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1358 	  WM_T_82575,		WMP_F_SERDES },
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1360 	  "82575GB quad-1000baseT Ethernet",
   1361 	  WM_T_82575,		WMP_F_COPPER },
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1363 	  "82575GB quad-1000baseT Ethernet (PM)",
   1364 	  WM_T_82575,		WMP_F_COPPER },
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1366 	  "82576 1000BaseT Ethernet",
   1367 	  WM_T_82576,		WMP_F_COPPER },
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1369 	  "82576 1000BaseX Ethernet",
   1370 	  WM_T_82576,		WMP_F_FIBER },
   1371 
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1373 	  "82576 gigabit Ethernet (SERDES)",
   1374 	  WM_T_82576,		WMP_F_SERDES },
   1375 
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1377 	  "82576 quad-1000BaseT Ethernet",
   1378 	  WM_T_82576,		WMP_F_COPPER },
   1379 
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1381 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1382 	  WM_T_82576,		WMP_F_COPPER },
   1383 
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1385 	  "82576 gigabit Ethernet",
   1386 	  WM_T_82576,		WMP_F_COPPER },
   1387 
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1389 	  "82576 gigabit Ethernet (SERDES)",
   1390 	  WM_T_82576,		WMP_F_SERDES },
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1392 	  "82576 quad-gigabit Ethernet (SERDES)",
   1393 	  WM_T_82576,		WMP_F_SERDES },
   1394 
   1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1396 	  "82580 1000BaseT Ethernet",
   1397 	  WM_T_82580,		WMP_F_COPPER },
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1399 	  "82580 1000BaseX Ethernet",
   1400 	  WM_T_82580,		WMP_F_FIBER },
   1401 
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1403 	  "82580 1000BaseT Ethernet (SERDES)",
   1404 	  WM_T_82580,		WMP_F_SERDES },
   1405 
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1407 	  "82580 gigabit Ethernet (SGMII)",
   1408 	  WM_T_82580,		WMP_F_COPPER },
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1410 	  "82580 dual-1000BaseT Ethernet",
   1411 	  WM_T_82580,		WMP_F_COPPER },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1414 	  "82580 quad-1000BaseX Ethernet",
   1415 	  WM_T_82580,		WMP_F_FIBER },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1418 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1419 	  WM_T_82580,		WMP_F_COPPER },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1422 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1423 	  WM_T_82580,		WMP_F_SERDES },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1426 	  "DH89XXCC 1000BASE-KX Ethernet",
   1427 	  WM_T_82580,		WMP_F_SERDES },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1430 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1431 	  WM_T_82580,		WMP_F_SERDES },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1434 	  "I350 Gigabit Network Connection",
   1435 	  WM_T_I350,		WMP_F_COPPER },
   1436 
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1438 	  "I350 Gigabit Fiber Network Connection",
   1439 	  WM_T_I350,		WMP_F_FIBER },
   1440 
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1442 	  "I350 Gigabit Backplane Connection",
   1443 	  WM_T_I350,		WMP_F_SERDES },
   1444 
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1446 	  "I350 Quad Port Gigabit Ethernet",
   1447 	  WM_T_I350,		WMP_F_SERDES },
   1448 
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1450 	  "I350 Gigabit Connection",
   1451 	  WM_T_I350,		WMP_F_COPPER },
   1452 
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1454 	  "I354 Gigabit Ethernet (KX)",
   1455 	  WM_T_I354,		WMP_F_SERDES },
   1456 
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1458 	  "I354 Gigabit Ethernet (SGMII)",
   1459 	  WM_T_I354,		WMP_F_COPPER },
   1460 
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1462 	  "I354 Gigabit Ethernet (2.5G)",
   1463 	  WM_T_I354,		WMP_F_COPPER },
   1464 
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1466 	  "I210-T1 Ethernet Server Adapter",
   1467 	  WM_T_I210,		WMP_F_COPPER },
   1468 
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1470 	  "I210 Ethernet (Copper OEM)",
   1471 	  WM_T_I210,		WMP_F_COPPER },
   1472 
   1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1474 	  "I210 Ethernet (Copper IT)",
   1475 	  WM_T_I210,		WMP_F_COPPER },
   1476 
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1478 	  "I210 Ethernet (Copper, FLASH less)",
   1479 	  WM_T_I210,		WMP_F_COPPER },
   1480 
   1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1482 	  "I210 Gigabit Ethernet (Fiber)",
   1483 	  WM_T_I210,		WMP_F_FIBER },
   1484 
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1486 	  "I210 Gigabit Ethernet (SERDES)",
   1487 	  WM_T_I210,		WMP_F_SERDES },
   1488 
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1490 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1491 	  WM_T_I210,		WMP_F_SERDES },
   1492 
   1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1494 	  "I210 Gigabit Ethernet (SGMII)",
   1495 	  WM_T_I210,		WMP_F_COPPER },
   1496 
   1497 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1498 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1499 	  WM_T_I210,		WMP_F_COPPER },
   1500 
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1502 	  "I211 Ethernet (COPPER)",
   1503 	  WM_T_I211,		WMP_F_COPPER },
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1505 	  "I217 V Ethernet Connection",
   1506 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1508 	  "I217 LM Ethernet Connection",
   1509 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1511 	  "I218 V Ethernet Connection",
   1512 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1514 	  "I218 V Ethernet Connection",
   1515 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1517 	  "I218 V Ethernet Connection",
   1518 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1520 	  "I218 LM Ethernet Connection",
   1521 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1523 	  "I218 LM Ethernet Connection",
   1524 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1526 	  "I218 LM Ethernet Connection",
   1527 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1529 	  "I219 V Ethernet Connection",
   1530 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1532 	  "I219 V Ethernet Connection",
   1533 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1535 	  "I219 V Ethernet Connection",
   1536 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1538 	  "I219 V Ethernet Connection",
   1539 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1541 	  "I219 LM Ethernet Connection",
   1542 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1544 	  "I219 LM Ethernet Connection",
   1545 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1547 	  "I219 LM Ethernet Connection",
   1548 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1550 	  "I219 LM Ethernet Connection",
   1551 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1553 	  "I219 LM Ethernet Connection",
   1554 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1556 	  "I219 V Ethernet Connection",
   1557 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1559 	  "I219 V Ethernet Connection",
   1560 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1562 	  "I219 LM Ethernet Connection",
   1563 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1564 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1565 	  "I219 LM Ethernet Connection",
   1566 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1567 	{ 0,			0,
   1568 	  NULL,
   1569 	  0,			0 },
   1570 };
   1571 
   1572 /*
   1573  * Register read/write functions.
   1574  * Other than CSR_{READ|WRITE}().
   1575  */
   1576 
   1577 #if 0 /* Not currently used */
   1578 static inline uint32_t
   1579 wm_io_read(struct wm_softc *sc, int reg)
   1580 {
   1581 
   1582 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1583 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1584 }
   1585 #endif
   1586 
   1587 static inline void
   1588 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1589 {
   1590 
   1591 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1592 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1593 }
   1594 
   1595 static inline void
   1596 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1597     uint32_t data)
   1598 {
   1599 	uint32_t regval;
   1600 	int i;
   1601 
   1602 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1603 
   1604 	CSR_WRITE(sc, reg, regval);
   1605 
   1606 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1607 		delay(5);
   1608 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1609 			break;
   1610 	}
   1611 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1612 		aprint_error("%s: WARNING:"
   1613 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1614 		    device_xname(sc->sc_dev), reg);
   1615 	}
   1616 }
   1617 
   1618 static inline void
   1619 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1620 {
   1621 	wa->wa_low = htole32(v & 0xffffffffU);
   1622 	if (sizeof(bus_addr_t) == 8)
   1623 		wa->wa_high = htole32((uint64_t) v >> 32);
   1624 	else
   1625 		wa->wa_high = 0;
   1626 }
   1627 
   1628 /*
   1629  * Descriptor sync/init functions.
   1630  */
   1631 static inline void
   1632 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1633 {
   1634 	struct wm_softc *sc = txq->txq_sc;
   1635 
   1636 	/* If it will wrap around, sync to the end of the ring. */
   1637 	if ((start + num) > WM_NTXDESC(txq)) {
   1638 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1639 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1640 		    (WM_NTXDESC(txq) - start), ops);
   1641 		num -= (WM_NTXDESC(txq) - start);
   1642 		start = 0;
   1643 	}
   1644 
   1645 	/* Now sync whatever is left. */
   1646 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1647 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1648 }
   1649 
   1650 static inline void
   1651 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1652 {
   1653 	struct wm_softc *sc = rxq->rxq_sc;
   1654 
   1655 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1656 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1657 }
   1658 
   1659 static inline void
   1660 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1661 {
   1662 	struct wm_softc *sc = rxq->rxq_sc;
   1663 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1664 	struct mbuf *m = rxs->rxs_mbuf;
   1665 
   1666 	/*
   1667 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1668 	 * so that the payload after the Ethernet header is aligned
   1669 	 * to a 4-byte boundary.
   1670 
   1671 	 * XXX BRAINDAMAGE ALERT!
   1672 	 * The stupid chip uses the same size for every buffer, which
   1673 	 * is set in the Receive Control register.  We are using the 2K
   1674 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1675 	 * reason, we can't "scoot" packets longer than the standard
   1676 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1677 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1678 	 * the upper layer copy the headers.
   1679 	 */
   1680 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1681 
   1682 	if (sc->sc_type == WM_T_82574) {
   1683 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1684 		rxd->erx_data.erxd_addr =
   1685 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1686 		rxd->erx_data.erxd_dd = 0;
   1687 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1688 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1689 
   1690 		rxd->nqrx_data.nrxd_paddr =
   1691 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1692 		/* Currently, split header is not supported. */
   1693 		rxd->nqrx_data.nrxd_haddr = 0;
   1694 	} else {
   1695 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1696 
   1697 		wm_set_dma_addr(&rxd->wrx_addr,
   1698 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1699 		rxd->wrx_len = 0;
   1700 		rxd->wrx_cksum = 0;
   1701 		rxd->wrx_status = 0;
   1702 		rxd->wrx_errors = 0;
   1703 		rxd->wrx_special = 0;
   1704 	}
   1705 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1706 
   1707 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1708 }
   1709 
   1710 /*
   1711  * Device driver interface functions and commonly used functions.
   1712  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1713  */
   1714 
   1715 /* Lookup supported device table */
   1716 static const struct wm_product *
   1717 wm_lookup(const struct pci_attach_args *pa)
   1718 {
   1719 	const struct wm_product *wmp;
   1720 
   1721 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1722 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1723 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1724 			return wmp;
   1725 	}
   1726 	return NULL;
   1727 }
   1728 
   1729 /* The match function (ca_match) */
   1730 static int
   1731 wm_match(device_t parent, cfdata_t cf, void *aux)
   1732 {
   1733 	struct pci_attach_args *pa = aux;
   1734 
   1735 	if (wm_lookup(pa) != NULL)
   1736 		return 1;
   1737 
   1738 	return 0;
   1739 }
   1740 
   1741 /* The attach function (ca_attach) */
   1742 static void
   1743 wm_attach(device_t parent, device_t self, void *aux)
   1744 {
   1745 	struct wm_softc *sc = device_private(self);
   1746 	struct pci_attach_args *pa = aux;
   1747 	prop_dictionary_t dict;
   1748 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1749 	pci_chipset_tag_t pc = pa->pa_pc;
   1750 	int counts[PCI_INTR_TYPE_SIZE];
   1751 	pci_intr_type_t max_type;
   1752 	const char *eetype, *xname;
   1753 	bus_space_tag_t memt;
   1754 	bus_space_handle_t memh;
   1755 	bus_size_t memsize;
   1756 	int memh_valid;
   1757 	int i, error;
   1758 	const struct wm_product *wmp;
   1759 	prop_data_t ea;
   1760 	prop_number_t pn;
   1761 	uint8_t enaddr[ETHER_ADDR_LEN];
   1762 	char buf[256];
   1763 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1764 	pcireg_t preg, memtype;
   1765 	uint16_t eeprom_data, apme_mask;
   1766 	bool force_clear_smbi;
   1767 	uint32_t link_mode;
   1768 	uint32_t reg;
   1769 
   1770 	sc->sc_dev = self;
   1771 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1772 	sc->sc_core_stopping = false;
   1773 
   1774 	wmp = wm_lookup(pa);
   1775 #ifdef DIAGNOSTIC
   1776 	if (wmp == NULL) {
   1777 		printf("\n");
   1778 		panic("wm_attach: impossible");
   1779 	}
   1780 #endif
   1781 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1782 
   1783 	sc->sc_pc = pa->pa_pc;
   1784 	sc->sc_pcitag = pa->pa_tag;
   1785 
   1786 	if (pci_dma64_available(pa))
   1787 		sc->sc_dmat = pa->pa_dmat64;
   1788 	else
   1789 		sc->sc_dmat = pa->pa_dmat;
   1790 
   1791 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1792 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1793 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1794 
   1795 	sc->sc_type = wmp->wmp_type;
   1796 
   1797 	/* Set default function pointers */
   1798 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1799 	sc->phy.release = sc->nvm.release = wm_put_null;
   1800 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1801 
   1802 	if (sc->sc_type < WM_T_82543) {
   1803 		if (sc->sc_rev < 2) {
   1804 			aprint_error_dev(sc->sc_dev,
   1805 			    "i82542 must be at least rev. 2\n");
   1806 			return;
   1807 		}
   1808 		if (sc->sc_rev < 3)
   1809 			sc->sc_type = WM_T_82542_2_0;
   1810 	}
   1811 
   1812 	/*
   1813 	 * Disable MSI for Errata:
   1814 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1815 	 *
   1816 	 *  82544: Errata 25
   1817 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1818 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1819 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1820 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1821 	 *
   1822 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1823 	 *
   1824 	 *  82571 & 82572: Errata 63
   1825 	 */
   1826 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1827 	    || (sc->sc_type == WM_T_82572))
   1828 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1829 
   1830 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1831 	    || (sc->sc_type == WM_T_82580)
   1832 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1833 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1834 		sc->sc_flags |= WM_F_NEWQUEUE;
   1835 
   1836 	/* Set device properties (mactype) */
   1837 	dict = device_properties(sc->sc_dev);
   1838 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1839 
   1840 	/*
   1841 	 * Map the device.  All devices support memory-mapped acccess,
   1842 	 * and it is really required for normal operation.
   1843 	 */
   1844 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1845 	switch (memtype) {
   1846 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1847 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1848 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1849 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1850 		break;
   1851 	default:
   1852 		memh_valid = 0;
   1853 		break;
   1854 	}
   1855 
   1856 	if (memh_valid) {
   1857 		sc->sc_st = memt;
   1858 		sc->sc_sh = memh;
   1859 		sc->sc_ss = memsize;
   1860 	} else {
   1861 		aprint_error_dev(sc->sc_dev,
   1862 		    "unable to map device registers\n");
   1863 		return;
   1864 	}
   1865 
   1866 	/*
   1867 	 * In addition, i82544 and later support I/O mapped indirect
   1868 	 * register access.  It is not desirable (nor supported in
   1869 	 * this driver) to use it for normal operation, though it is
   1870 	 * required to work around bugs in some chip versions.
   1871 	 */
   1872 	if (sc->sc_type >= WM_T_82544) {
   1873 		/* First we have to find the I/O BAR. */
   1874 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1875 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1876 			if (memtype == PCI_MAPREG_TYPE_IO)
   1877 				break;
   1878 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1879 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1880 				i += 4;	/* skip high bits, too */
   1881 		}
   1882 		if (i < PCI_MAPREG_END) {
   1883 			/*
   1884 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1885 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1886 			 * It's no problem because newer chips has no this
   1887 			 * bug.
   1888 			 *
   1889 			 * The i8254x doesn't apparently respond when the
   1890 			 * I/O BAR is 0, which looks somewhat like it's not
   1891 			 * been configured.
   1892 			 */
   1893 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1894 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1895 				aprint_error_dev(sc->sc_dev,
   1896 				    "WARNING: I/O BAR at zero.\n");
   1897 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1898 					0, &sc->sc_iot, &sc->sc_ioh,
   1899 					NULL, &sc->sc_ios) == 0) {
   1900 				sc->sc_flags |= WM_F_IOH_VALID;
   1901 			} else
   1902 				aprint_error_dev(sc->sc_dev,
   1903 				    "WARNING: unable to map I/O space\n");
   1904 		}
   1905 
   1906 	}
   1907 
   1908 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1909 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1910 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1911 	if (sc->sc_type < WM_T_82542_2_1)
   1912 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1913 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1914 
   1915 	/* power up chip */
   1916 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1917 	    && error != EOPNOTSUPP) {
   1918 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1919 		return;
   1920 	}
   1921 
   1922 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1923 	/*
   1924 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1925 	 * resource.
   1926 	 */
   1927 	if (sc->sc_nqueues > 1) {
   1928 		max_type = PCI_INTR_TYPE_MSIX;
   1929 		/*
   1930 		 *  82583 has a MSI-X capability in the PCI configuration space
   1931 		 * but it doesn't support it. At least the document doesn't
   1932 		 * say anything about MSI-X.
   1933 		 */
   1934 		counts[PCI_INTR_TYPE_MSIX]
   1935 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1936 	} else {
   1937 		max_type = PCI_INTR_TYPE_MSI;
   1938 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1939 	}
   1940 
   1941 	/* Allocation settings */
   1942 	counts[PCI_INTR_TYPE_MSI] = 1;
   1943 	counts[PCI_INTR_TYPE_INTX] = 1;
   1944 	/* overridden by disable flags */
   1945 	if (wm_disable_msi != 0) {
   1946 		counts[PCI_INTR_TYPE_MSI] = 0;
   1947 		if (wm_disable_msix != 0) {
   1948 			max_type = PCI_INTR_TYPE_INTX;
   1949 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1950 		}
   1951 	} else if (wm_disable_msix != 0) {
   1952 		max_type = PCI_INTR_TYPE_MSI;
   1953 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1954 	}
   1955 
   1956 alloc_retry:
   1957 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1958 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1959 		return;
   1960 	}
   1961 
   1962 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1963 		error = wm_setup_msix(sc);
   1964 		if (error) {
   1965 			pci_intr_release(pc, sc->sc_intrs,
   1966 			    counts[PCI_INTR_TYPE_MSIX]);
   1967 
   1968 			/* Setup for MSI: Disable MSI-X */
   1969 			max_type = PCI_INTR_TYPE_MSI;
   1970 			counts[PCI_INTR_TYPE_MSI] = 1;
   1971 			counts[PCI_INTR_TYPE_INTX] = 1;
   1972 			goto alloc_retry;
   1973 		}
   1974 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1975 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1976 		error = wm_setup_legacy(sc);
   1977 		if (error) {
   1978 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1979 			    counts[PCI_INTR_TYPE_MSI]);
   1980 
   1981 			/* The next try is for INTx: Disable MSI */
   1982 			max_type = PCI_INTR_TYPE_INTX;
   1983 			counts[PCI_INTR_TYPE_INTX] = 1;
   1984 			goto alloc_retry;
   1985 		}
   1986 	} else {
   1987 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1988 		error = wm_setup_legacy(sc);
   1989 		if (error) {
   1990 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1991 			    counts[PCI_INTR_TYPE_INTX]);
   1992 			return;
   1993 		}
   1994 	}
   1995 
   1996 	/*
   1997 	 * Check the function ID (unit number of the chip).
   1998 	 */
   1999 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2000 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2001 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2002 	    || (sc->sc_type == WM_T_82580)
   2003 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2004 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2005 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2006 	else
   2007 		sc->sc_funcid = 0;
   2008 
   2009 	/*
   2010 	 * Determine a few things about the bus we're connected to.
   2011 	 */
   2012 	if (sc->sc_type < WM_T_82543) {
   2013 		/* We don't really know the bus characteristics here. */
   2014 		sc->sc_bus_speed = 33;
   2015 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2016 		/*
   2017 		 * CSA (Communication Streaming Architecture) is about as fast
   2018 		 * a 32-bit 66MHz PCI Bus.
   2019 		 */
   2020 		sc->sc_flags |= WM_F_CSA;
   2021 		sc->sc_bus_speed = 66;
   2022 		aprint_verbose_dev(sc->sc_dev,
   2023 		    "Communication Streaming Architecture\n");
   2024 		if (sc->sc_type == WM_T_82547) {
   2025 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2026 			callout_setfunc(&sc->sc_txfifo_ch,
   2027 			    wm_82547_txfifo_stall, sc);
   2028 			aprint_verbose_dev(sc->sc_dev,
   2029 			    "using 82547 Tx FIFO stall work-around\n");
   2030 		}
   2031 	} else if (sc->sc_type >= WM_T_82571) {
   2032 		sc->sc_flags |= WM_F_PCIE;
   2033 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2034 		    && (sc->sc_type != WM_T_ICH10)
   2035 		    && (sc->sc_type != WM_T_PCH)
   2036 		    && (sc->sc_type != WM_T_PCH2)
   2037 		    && (sc->sc_type != WM_T_PCH_LPT)
   2038 		    && (sc->sc_type != WM_T_PCH_SPT)
   2039 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2040 			/* ICH* and PCH* have no PCIe capability registers */
   2041 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2042 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2043 				NULL) == 0)
   2044 				aprint_error_dev(sc->sc_dev,
   2045 				    "unable to find PCIe capability\n");
   2046 		}
   2047 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2048 	} else {
   2049 		reg = CSR_READ(sc, WMREG_STATUS);
   2050 		if (reg & STATUS_BUS64)
   2051 			sc->sc_flags |= WM_F_BUS64;
   2052 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2053 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2054 
   2055 			sc->sc_flags |= WM_F_PCIX;
   2056 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2057 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2058 				aprint_error_dev(sc->sc_dev,
   2059 				    "unable to find PCIX capability\n");
   2060 			else if (sc->sc_type != WM_T_82545_3 &&
   2061 				 sc->sc_type != WM_T_82546_3) {
   2062 				/*
   2063 				 * Work around a problem caused by the BIOS
   2064 				 * setting the max memory read byte count
   2065 				 * incorrectly.
   2066 				 */
   2067 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2068 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2069 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2070 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2071 
   2072 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2073 				    PCIX_CMD_BYTECNT_SHIFT;
   2074 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2075 				    PCIX_STATUS_MAXB_SHIFT;
   2076 				if (bytecnt > maxb) {
   2077 					aprint_verbose_dev(sc->sc_dev,
   2078 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2079 					    512 << bytecnt, 512 << maxb);
   2080 					pcix_cmd = (pcix_cmd &
   2081 					    ~PCIX_CMD_BYTECNT_MASK) |
   2082 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2083 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2084 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2085 					    pcix_cmd);
   2086 				}
   2087 			}
   2088 		}
   2089 		/*
   2090 		 * The quad port adapter is special; it has a PCIX-PCIX
   2091 		 * bridge on the board, and can run the secondary bus at
   2092 		 * a higher speed.
   2093 		 */
   2094 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2095 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2096 								      : 66;
   2097 		} else if (sc->sc_flags & WM_F_PCIX) {
   2098 			switch (reg & STATUS_PCIXSPD_MASK) {
   2099 			case STATUS_PCIXSPD_50_66:
   2100 				sc->sc_bus_speed = 66;
   2101 				break;
   2102 			case STATUS_PCIXSPD_66_100:
   2103 				sc->sc_bus_speed = 100;
   2104 				break;
   2105 			case STATUS_PCIXSPD_100_133:
   2106 				sc->sc_bus_speed = 133;
   2107 				break;
   2108 			default:
   2109 				aprint_error_dev(sc->sc_dev,
   2110 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2111 				    reg & STATUS_PCIXSPD_MASK);
   2112 				sc->sc_bus_speed = 66;
   2113 				break;
   2114 			}
   2115 		} else
   2116 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2117 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2118 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2119 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2120 	}
   2121 
   2122 	/* clear interesting stat counters */
   2123 	CSR_READ(sc, WMREG_COLC);
   2124 	CSR_READ(sc, WMREG_RXERRC);
   2125 
   2126 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2127 	    || (sc->sc_type >= WM_T_ICH8))
   2128 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2129 	if (sc->sc_type >= WM_T_ICH8)
   2130 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2131 
   2132 	/* Set PHY, NVM mutex related stuff */
   2133 	switch (sc->sc_type) {
   2134 	case WM_T_82542_2_0:
   2135 	case WM_T_82542_2_1:
   2136 	case WM_T_82543:
   2137 	case WM_T_82544:
   2138 		/* Microwire */
   2139 		sc->nvm.read = wm_nvm_read_uwire;
   2140 		sc->sc_nvm_wordsize = 64;
   2141 		sc->sc_nvm_addrbits = 6;
   2142 		break;
   2143 	case WM_T_82540:
   2144 	case WM_T_82545:
   2145 	case WM_T_82545_3:
   2146 	case WM_T_82546:
   2147 	case WM_T_82546_3:
   2148 		/* Microwire */
   2149 		sc->nvm.read = wm_nvm_read_uwire;
   2150 		reg = CSR_READ(sc, WMREG_EECD);
   2151 		if (reg & EECD_EE_SIZE) {
   2152 			sc->sc_nvm_wordsize = 256;
   2153 			sc->sc_nvm_addrbits = 8;
   2154 		} else {
   2155 			sc->sc_nvm_wordsize = 64;
   2156 			sc->sc_nvm_addrbits = 6;
   2157 		}
   2158 		sc->sc_flags |= WM_F_LOCK_EECD;
   2159 		sc->nvm.acquire = wm_get_eecd;
   2160 		sc->nvm.release = wm_put_eecd;
   2161 		break;
   2162 	case WM_T_82541:
   2163 	case WM_T_82541_2:
   2164 	case WM_T_82547:
   2165 	case WM_T_82547_2:
   2166 		reg = CSR_READ(sc, WMREG_EECD);
   2167 		/*
   2168 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2169 		 * on 8254[17], so set flags and functios before calling it.
   2170 		 */
   2171 		sc->sc_flags |= WM_F_LOCK_EECD;
   2172 		sc->nvm.acquire = wm_get_eecd;
   2173 		sc->nvm.release = wm_put_eecd;
   2174 		if (reg & EECD_EE_TYPE) {
   2175 			/* SPI */
   2176 			sc->nvm.read = wm_nvm_read_spi;
   2177 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2178 			wm_nvm_set_addrbits_size_eecd(sc);
   2179 		} else {
   2180 			/* Microwire */
   2181 			sc->nvm.read = wm_nvm_read_uwire;
   2182 			if ((reg & EECD_EE_ABITS) != 0) {
   2183 				sc->sc_nvm_wordsize = 256;
   2184 				sc->sc_nvm_addrbits = 8;
   2185 			} else {
   2186 				sc->sc_nvm_wordsize = 64;
   2187 				sc->sc_nvm_addrbits = 6;
   2188 			}
   2189 		}
   2190 		break;
   2191 	case WM_T_82571:
   2192 	case WM_T_82572:
   2193 		/* SPI */
   2194 		sc->nvm.read = wm_nvm_read_eerd;
   2195 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2196 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2197 		wm_nvm_set_addrbits_size_eecd(sc);
   2198 		sc->phy.acquire = wm_get_swsm_semaphore;
   2199 		sc->phy.release = wm_put_swsm_semaphore;
   2200 		sc->nvm.acquire = wm_get_nvm_82571;
   2201 		sc->nvm.release = wm_put_nvm_82571;
   2202 		break;
   2203 	case WM_T_82573:
   2204 	case WM_T_82574:
   2205 	case WM_T_82583:
   2206 		sc->nvm.read = wm_nvm_read_eerd;
   2207 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2208 		if (sc->sc_type == WM_T_82573) {
   2209 			sc->phy.acquire = wm_get_swsm_semaphore;
   2210 			sc->phy.release = wm_put_swsm_semaphore;
   2211 			sc->nvm.acquire = wm_get_nvm_82571;
   2212 			sc->nvm.release = wm_put_nvm_82571;
   2213 		} else {
   2214 			/* Both PHY and NVM use the same semaphore. */
   2215 			sc->phy.acquire = sc->nvm.acquire
   2216 			    = wm_get_swfwhw_semaphore;
   2217 			sc->phy.release = sc->nvm.release
   2218 			    = wm_put_swfwhw_semaphore;
   2219 		}
   2220 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2221 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2222 			sc->sc_nvm_wordsize = 2048;
   2223 		} else {
   2224 			/* SPI */
   2225 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2226 			wm_nvm_set_addrbits_size_eecd(sc);
   2227 		}
   2228 		break;
   2229 	case WM_T_82575:
   2230 	case WM_T_82576:
   2231 	case WM_T_82580:
   2232 	case WM_T_I350:
   2233 	case WM_T_I354:
   2234 	case WM_T_80003:
   2235 		/* SPI */
   2236 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2237 		wm_nvm_set_addrbits_size_eecd(sc);
   2238 		if ((sc->sc_type == WM_T_80003)
   2239 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2240 			sc->nvm.read = wm_nvm_read_eerd;
   2241 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2242 		} else {
   2243 			sc->nvm.read = wm_nvm_read_spi;
   2244 			sc->sc_flags |= WM_F_LOCK_EECD;
   2245 		}
   2246 		sc->phy.acquire = wm_get_phy_82575;
   2247 		sc->phy.release = wm_put_phy_82575;
   2248 		sc->nvm.acquire = wm_get_nvm_80003;
   2249 		sc->nvm.release = wm_put_nvm_80003;
   2250 		break;
   2251 	case WM_T_ICH8:
   2252 	case WM_T_ICH9:
   2253 	case WM_T_ICH10:
   2254 	case WM_T_PCH:
   2255 	case WM_T_PCH2:
   2256 	case WM_T_PCH_LPT:
   2257 		sc->nvm.read = wm_nvm_read_ich8;
   2258 		/* FLASH */
   2259 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2260 		sc->sc_nvm_wordsize = 2048;
   2261 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2262 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2263 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2264 			aprint_error_dev(sc->sc_dev,
   2265 			    "can't map FLASH registers\n");
   2266 			goto out;
   2267 		}
   2268 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2269 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2270 		    ICH_FLASH_SECTOR_SIZE;
   2271 		sc->sc_ich8_flash_bank_size =
   2272 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2273 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2274 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2275 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2276 		sc->sc_flashreg_offset = 0;
   2277 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2278 		sc->phy.release = wm_put_swflag_ich8lan;
   2279 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2280 		sc->nvm.release = wm_put_nvm_ich8lan;
   2281 		break;
   2282 	case WM_T_PCH_SPT:
   2283 	case WM_T_PCH_CNP:
   2284 		sc->nvm.read = wm_nvm_read_spt;
   2285 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2286 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2287 		sc->sc_flasht = sc->sc_st;
   2288 		sc->sc_flashh = sc->sc_sh;
   2289 		sc->sc_ich8_flash_base = 0;
   2290 		sc->sc_nvm_wordsize =
   2291 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2292 		    * NVM_SIZE_MULTIPLIER;
   2293 		/* It is size in bytes, we want words */
   2294 		sc->sc_nvm_wordsize /= 2;
   2295 		/* assume 2 banks */
   2296 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2297 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2298 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2299 		sc->phy.release = wm_put_swflag_ich8lan;
   2300 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2301 		sc->nvm.release = wm_put_nvm_ich8lan;
   2302 		break;
   2303 	case WM_T_I210:
   2304 	case WM_T_I211:
   2305 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2306 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2307 		if (wm_nvm_flash_presence_i210(sc)) {
   2308 			sc->nvm.read = wm_nvm_read_eerd;
   2309 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2310 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2311 			wm_nvm_set_addrbits_size_eecd(sc);
   2312 		} else {
   2313 			sc->nvm.read = wm_nvm_read_invm;
   2314 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2315 			sc->sc_nvm_wordsize = INVM_SIZE;
   2316 		}
   2317 		sc->phy.acquire = wm_get_phy_82575;
   2318 		sc->phy.release = wm_put_phy_82575;
   2319 		sc->nvm.acquire = wm_get_nvm_80003;
   2320 		sc->nvm.release = wm_put_nvm_80003;
   2321 		break;
   2322 	default:
   2323 		break;
   2324 	}
   2325 
   2326 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2327 	switch (sc->sc_type) {
   2328 	case WM_T_82571:
   2329 	case WM_T_82572:
   2330 		reg = CSR_READ(sc, WMREG_SWSM2);
   2331 		if ((reg & SWSM2_LOCK) == 0) {
   2332 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2333 			force_clear_smbi = true;
   2334 		} else
   2335 			force_clear_smbi = false;
   2336 		break;
   2337 	case WM_T_82573:
   2338 	case WM_T_82574:
   2339 	case WM_T_82583:
   2340 		force_clear_smbi = true;
   2341 		break;
   2342 	default:
   2343 		force_clear_smbi = false;
   2344 		break;
   2345 	}
   2346 	if (force_clear_smbi) {
   2347 		reg = CSR_READ(sc, WMREG_SWSM);
   2348 		if ((reg & SWSM_SMBI) != 0)
   2349 			aprint_error_dev(sc->sc_dev,
   2350 			    "Please update the Bootagent\n");
   2351 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2352 	}
   2353 
   2354 	/*
   2355 	 * Defer printing the EEPROM type until after verifying the checksum
   2356 	 * This allows the EEPROM type to be printed correctly in the case
   2357 	 * that no EEPROM is attached.
   2358 	 */
   2359 	/*
   2360 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2361 	 * this for later, so we can fail future reads from the EEPROM.
   2362 	 */
   2363 	if (wm_nvm_validate_checksum(sc)) {
   2364 		/*
   2365 		 * Read twice again because some PCI-e parts fail the
   2366 		 * first check due to the link being in sleep state.
   2367 		 */
   2368 		if (wm_nvm_validate_checksum(sc))
   2369 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2370 	}
   2371 
   2372 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2373 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2374 	else {
   2375 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2376 		    sc->sc_nvm_wordsize);
   2377 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2378 			aprint_verbose("iNVM");
   2379 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2380 			aprint_verbose("FLASH(HW)");
   2381 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2382 			aprint_verbose("FLASH");
   2383 		else {
   2384 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2385 				eetype = "SPI";
   2386 			else
   2387 				eetype = "MicroWire";
   2388 			aprint_verbose("(%d address bits) %s EEPROM",
   2389 			    sc->sc_nvm_addrbits, eetype);
   2390 		}
   2391 	}
   2392 	wm_nvm_version(sc);
   2393 	aprint_verbose("\n");
   2394 
   2395 	/*
   2396 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2397 	 * incorrect.
   2398 	 */
   2399 	wm_gmii_setup_phytype(sc, 0, 0);
   2400 
   2401 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2402 	switch (sc->sc_type) {
   2403 	case WM_T_ICH8:
   2404 	case WM_T_ICH9:
   2405 	case WM_T_ICH10:
   2406 	case WM_T_PCH:
   2407 	case WM_T_PCH2:
   2408 	case WM_T_PCH_LPT:
   2409 	case WM_T_PCH_SPT:
   2410 	case WM_T_PCH_CNP:
   2411 		apme_mask = WUC_APME;
   2412 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2413 		if ((eeprom_data & apme_mask) != 0)
   2414 			sc->sc_flags |= WM_F_WOL;
   2415 		break;
   2416 	default:
   2417 		break;
   2418 	}
   2419 
   2420 	/* Reset the chip to a known state. */
   2421 	wm_reset(sc);
   2422 
   2423 	/*
   2424 	 * Check for I21[01] PLL workaround.
   2425 	 *
   2426 	 * Three cases:
   2427 	 * a) Chip is I211.
   2428 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2429 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2430 	 */
   2431 	if (sc->sc_type == WM_T_I211)
   2432 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2433 	if (sc->sc_type == WM_T_I210) {
   2434 		if (!wm_nvm_flash_presence_i210(sc))
   2435 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2436 		else if ((sc->sc_nvm_ver_major < 3)
   2437 		    || ((sc->sc_nvm_ver_major == 3)
   2438 			&& (sc->sc_nvm_ver_minor < 25))) {
   2439 			aprint_verbose_dev(sc->sc_dev,
   2440 			    "ROM image version %d.%d is older than 3.25\n",
   2441 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2442 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2443 		}
   2444 	}
   2445 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2446 		wm_pll_workaround_i210(sc);
   2447 
   2448 	wm_get_wakeup(sc);
   2449 
   2450 	/* Non-AMT based hardware can now take control from firmware */
   2451 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2452 		wm_get_hw_control(sc);
   2453 
   2454 	/*
   2455 	 * Read the Ethernet address from the EEPROM, if not first found
   2456 	 * in device properties.
   2457 	 */
   2458 	ea = prop_dictionary_get(dict, "mac-address");
   2459 	if (ea != NULL) {
   2460 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2461 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2462 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2463 	} else {
   2464 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2465 			aprint_error_dev(sc->sc_dev,
   2466 			    "unable to read Ethernet address\n");
   2467 			goto out;
   2468 		}
   2469 	}
   2470 
   2471 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2472 	    ether_sprintf(enaddr));
   2473 
   2474 	/*
   2475 	 * Read the config info from the EEPROM, and set up various
   2476 	 * bits in the control registers based on their contents.
   2477 	 */
   2478 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2479 	if (pn != NULL) {
   2480 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2481 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2482 	} else {
   2483 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2484 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2485 			goto out;
   2486 		}
   2487 	}
   2488 
   2489 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2490 	if (pn != NULL) {
   2491 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2492 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2493 	} else {
   2494 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2495 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2496 			goto out;
   2497 		}
   2498 	}
   2499 
   2500 	/* check for WM_F_WOL */
   2501 	switch (sc->sc_type) {
   2502 	case WM_T_82542_2_0:
   2503 	case WM_T_82542_2_1:
   2504 	case WM_T_82543:
   2505 		/* dummy? */
   2506 		eeprom_data = 0;
   2507 		apme_mask = NVM_CFG3_APME;
   2508 		break;
   2509 	case WM_T_82544:
   2510 		apme_mask = NVM_CFG2_82544_APM_EN;
   2511 		eeprom_data = cfg2;
   2512 		break;
   2513 	case WM_T_82546:
   2514 	case WM_T_82546_3:
   2515 	case WM_T_82571:
   2516 	case WM_T_82572:
   2517 	case WM_T_82573:
   2518 	case WM_T_82574:
   2519 	case WM_T_82583:
   2520 	case WM_T_80003:
   2521 	case WM_T_82575:
   2522 	case WM_T_82576:
   2523 		apme_mask = NVM_CFG3_APME;
   2524 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2525 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2526 		break;
   2527 	case WM_T_82580:
   2528 	case WM_T_I350:
   2529 	case WM_T_I354:
   2530 	case WM_T_I210:
   2531 	case WM_T_I211:
   2532 		apme_mask = NVM_CFG3_APME;
   2533 		wm_nvm_read(sc,
   2534 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2535 		    1, &eeprom_data);
   2536 		break;
   2537 	case WM_T_ICH8:
   2538 	case WM_T_ICH9:
   2539 	case WM_T_ICH10:
   2540 	case WM_T_PCH:
   2541 	case WM_T_PCH2:
   2542 	case WM_T_PCH_LPT:
   2543 	case WM_T_PCH_SPT:
   2544 	case WM_T_PCH_CNP:
   2545 		/* Already checked before wm_reset () */
   2546 		apme_mask = eeprom_data = 0;
   2547 		break;
   2548 	default: /* XXX 82540 */
   2549 		apme_mask = NVM_CFG3_APME;
   2550 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2551 		break;
   2552 	}
   2553 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2554 	if ((eeprom_data & apme_mask) != 0)
   2555 		sc->sc_flags |= WM_F_WOL;
   2556 
   2557 	/*
   2558 	 * We have the eeprom settings, now apply the special cases
   2559 	 * where the eeprom may be wrong or the board won't support
   2560 	 * wake on lan on a particular port
   2561 	 */
   2562 	switch (sc->sc_pcidevid) {
   2563 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2564 		sc->sc_flags &= ~WM_F_WOL;
   2565 		break;
   2566 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2567 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2568 		/* Wake events only supported on port A for dual fiber
   2569 		 * regardless of eeprom setting */
   2570 		if (sc->sc_funcid == 1)
   2571 			sc->sc_flags &= ~WM_F_WOL;
   2572 		break;
   2573 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2574 		/* if quad port adapter, disable WoL on all but port A */
   2575 		if (sc->sc_funcid != 0)
   2576 			sc->sc_flags &= ~WM_F_WOL;
   2577 		break;
   2578 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2579 		/* Wake events only supported on port A for dual fiber
   2580 		 * regardless of eeprom setting */
   2581 		if (sc->sc_funcid == 1)
   2582 			sc->sc_flags &= ~WM_F_WOL;
   2583 		break;
   2584 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2585 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2586 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2587 		/* if quad port adapter, disable WoL on all but port A */
   2588 		if (sc->sc_funcid != 0)
   2589 			sc->sc_flags &= ~WM_F_WOL;
   2590 		break;
   2591 	}
   2592 
   2593 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2594 		/* Check NVM for autonegotiation */
   2595 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2596 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2597 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2598 		}
   2599 	}
   2600 
   2601 	/*
   2602 	 * XXX need special handling for some multiple port cards
   2603 	 * to disable a paticular port.
   2604 	 */
   2605 
   2606 	if (sc->sc_type >= WM_T_82544) {
   2607 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2608 		if (pn != NULL) {
   2609 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2610 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2611 		} else {
   2612 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2613 				aprint_error_dev(sc->sc_dev,
   2614 				    "unable to read SWDPIN\n");
   2615 				goto out;
   2616 			}
   2617 		}
   2618 	}
   2619 
   2620 	if (cfg1 & NVM_CFG1_ILOS)
   2621 		sc->sc_ctrl |= CTRL_ILOS;
   2622 
   2623 	/*
   2624 	 * XXX
   2625 	 * This code isn't correct because pin 2 and 3 are located
   2626 	 * in different position on newer chips. Check all datasheet.
   2627 	 *
   2628 	 * Until resolve this problem, check if a chip < 82580
   2629 	 */
   2630 	if (sc->sc_type <= WM_T_82580) {
   2631 		if (sc->sc_type >= WM_T_82544) {
   2632 			sc->sc_ctrl |=
   2633 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2634 			    CTRL_SWDPIO_SHIFT;
   2635 			sc->sc_ctrl |=
   2636 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2637 			    CTRL_SWDPINS_SHIFT;
   2638 		} else {
   2639 			sc->sc_ctrl |=
   2640 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2641 			    CTRL_SWDPIO_SHIFT;
   2642 		}
   2643 	}
   2644 
   2645 	/* XXX For other than 82580? */
   2646 	if (sc->sc_type == WM_T_82580) {
   2647 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2648 		if (nvmword & __BIT(13))
   2649 			sc->sc_ctrl |= CTRL_ILOS;
   2650 	}
   2651 
   2652 #if 0
   2653 	if (sc->sc_type >= WM_T_82544) {
   2654 		if (cfg1 & NVM_CFG1_IPS0)
   2655 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2656 		if (cfg1 & NVM_CFG1_IPS1)
   2657 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2658 		sc->sc_ctrl_ext |=
   2659 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2660 		    CTRL_EXT_SWDPIO_SHIFT;
   2661 		sc->sc_ctrl_ext |=
   2662 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2663 		    CTRL_EXT_SWDPINS_SHIFT;
   2664 	} else {
   2665 		sc->sc_ctrl_ext |=
   2666 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2667 		    CTRL_EXT_SWDPIO_SHIFT;
   2668 	}
   2669 #endif
   2670 
   2671 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2672 #if 0
   2673 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2674 #endif
   2675 
   2676 	if (sc->sc_type == WM_T_PCH) {
   2677 		uint16_t val;
   2678 
   2679 		/* Save the NVM K1 bit setting */
   2680 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2681 
   2682 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2683 			sc->sc_nvm_k1_enabled = 1;
   2684 		else
   2685 			sc->sc_nvm_k1_enabled = 0;
   2686 	}
   2687 
   2688 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2689 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2690 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2691 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2692 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2693 	    || sc->sc_type == WM_T_82573
   2694 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2695 		/* Copper only */
   2696 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2697 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2698 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2699 	    || (sc->sc_type ==WM_T_I211)) {
   2700 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2701 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2702 		switch (link_mode) {
   2703 		case CTRL_EXT_LINK_MODE_1000KX:
   2704 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2705 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2706 			break;
   2707 		case CTRL_EXT_LINK_MODE_SGMII:
   2708 			if (wm_sgmii_uses_mdio(sc)) {
   2709 				aprint_verbose_dev(sc->sc_dev,
   2710 				    "SGMII(MDIO)\n");
   2711 				sc->sc_flags |= WM_F_SGMII;
   2712 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2713 				break;
   2714 			}
   2715 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2716 			/*FALLTHROUGH*/
   2717 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2718 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2719 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2720 				if (link_mode
   2721 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2722 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2723 					sc->sc_flags |= WM_F_SGMII;
   2724 				} else {
   2725 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2726 					aprint_verbose_dev(sc->sc_dev,
   2727 					    "SERDES\n");
   2728 				}
   2729 				break;
   2730 			}
   2731 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2732 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2733 
   2734 			/* Change current link mode setting */
   2735 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2736 			switch (sc->sc_mediatype) {
   2737 			case WM_MEDIATYPE_COPPER:
   2738 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2739 				break;
   2740 			case WM_MEDIATYPE_SERDES:
   2741 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2742 				break;
   2743 			default:
   2744 				break;
   2745 			}
   2746 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2747 			break;
   2748 		case CTRL_EXT_LINK_MODE_GMII:
   2749 		default:
   2750 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2751 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2752 			break;
   2753 		}
   2754 
   2755 		reg &= ~CTRL_EXT_I2C_ENA;
   2756 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2757 			reg |= CTRL_EXT_I2C_ENA;
   2758 		else
   2759 			reg &= ~CTRL_EXT_I2C_ENA;
   2760 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2761 	} else if (sc->sc_type < WM_T_82543 ||
   2762 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2763 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2764 			aprint_error_dev(sc->sc_dev,
   2765 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2766 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2767 		}
   2768 	} else {
   2769 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2770 			aprint_error_dev(sc->sc_dev,
   2771 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2772 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2773 		}
   2774 	}
   2775 
   2776 	if (sc->sc_type >= WM_T_PCH2)
   2777 		sc->sc_flags |= WM_F_EEE;
   2778 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2779 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2780 		/* XXX: Need special handling for I354. (not yet) */
   2781 		if (sc->sc_type != WM_T_I354)
   2782 			sc->sc_flags |= WM_F_EEE;
   2783 	}
   2784 
   2785 	/* Set device properties (macflags) */
   2786 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2787 
   2788 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2789 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2790 
   2791 	/* Initialize the media structures accordingly. */
   2792 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2793 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2794 	else
   2795 		wm_tbi_mediainit(sc); /* All others */
   2796 
   2797 	ifp = &sc->sc_ethercom.ec_if;
   2798 	xname = device_xname(sc->sc_dev);
   2799 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2800 	ifp->if_softc = sc;
   2801 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2802 #ifdef WM_MPSAFE
   2803 	ifp->if_extflags = IFEF_MPSAFE;
   2804 #endif
   2805 	ifp->if_ioctl = wm_ioctl;
   2806 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2807 		ifp->if_start = wm_nq_start;
   2808 		/*
   2809 		 * When the number of CPUs is one and the controller can use
   2810 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2811 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2812 		 * and the other is used for link status changing.
   2813 		 * In this situation, wm_nq_transmit() is disadvantageous
   2814 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2815 		 */
   2816 		if (wm_is_using_multiqueue(sc))
   2817 			ifp->if_transmit = wm_nq_transmit;
   2818 	} else {
   2819 		ifp->if_start = wm_start;
   2820 		/*
   2821 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2822 		 */
   2823 		if (wm_is_using_multiqueue(sc))
   2824 			ifp->if_transmit = wm_transmit;
   2825 	}
   2826 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2827 	ifp->if_init = wm_init;
   2828 	ifp->if_stop = wm_stop;
   2829 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2830 	IFQ_SET_READY(&ifp->if_snd);
   2831 
   2832 	/* Check for jumbo frame */
   2833 	switch (sc->sc_type) {
   2834 	case WM_T_82573:
   2835 		/* XXX limited to 9234 if ASPM is disabled */
   2836 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2837 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2838 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2839 		break;
   2840 	case WM_T_82571:
   2841 	case WM_T_82572:
   2842 	case WM_T_82574:
   2843 	case WM_T_82583:
   2844 	case WM_T_82575:
   2845 	case WM_T_82576:
   2846 	case WM_T_82580:
   2847 	case WM_T_I350:
   2848 	case WM_T_I354:
   2849 	case WM_T_I210:
   2850 	case WM_T_I211:
   2851 	case WM_T_80003:
   2852 	case WM_T_ICH9:
   2853 	case WM_T_ICH10:
   2854 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2855 	case WM_T_PCH_LPT:
   2856 	case WM_T_PCH_SPT:
   2857 	case WM_T_PCH_CNP:
   2858 		/* XXX limited to 9234 */
   2859 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2860 		break;
   2861 	case WM_T_PCH:
   2862 		/* XXX limited to 4096 */
   2863 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2864 		break;
   2865 	case WM_T_82542_2_0:
   2866 	case WM_T_82542_2_1:
   2867 	case WM_T_ICH8:
   2868 		/* No support for jumbo frame */
   2869 		break;
   2870 	default:
   2871 		/* ETHER_MAX_LEN_JUMBO */
   2872 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2873 		break;
   2874 	}
   2875 
   2876 	/* If we're a i82543 or greater, we can support VLANs. */
   2877 	if (sc->sc_type >= WM_T_82543)
   2878 		sc->sc_ethercom.ec_capabilities |=
   2879 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2880 
   2881 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2882 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2883 
   2884 	/*
   2885 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2886 	 * on i82543 and later.
   2887 	 */
   2888 	if (sc->sc_type >= WM_T_82543) {
   2889 		ifp->if_capabilities |=
   2890 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2891 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2892 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2893 		    IFCAP_CSUM_TCPv6_Tx |
   2894 		    IFCAP_CSUM_UDPv6_Tx;
   2895 	}
   2896 
   2897 	/*
   2898 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2899 	 *
   2900 	 *	82541GI (8086:1076) ... no
   2901 	 *	82572EI (8086:10b9) ... yes
   2902 	 */
   2903 	if (sc->sc_type >= WM_T_82571) {
   2904 		ifp->if_capabilities |=
   2905 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2906 	}
   2907 
   2908 	/*
   2909 	 * If we're a i82544 or greater (except i82547), we can do
   2910 	 * TCP segmentation offload.
   2911 	 */
   2912 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2913 		ifp->if_capabilities |= IFCAP_TSOv4;
   2914 	}
   2915 
   2916 	if (sc->sc_type >= WM_T_82571) {
   2917 		ifp->if_capabilities |= IFCAP_TSOv6;
   2918 	}
   2919 
   2920 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2921 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2922 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2923 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2924 
   2925 #ifdef WM_MPSAFE
   2926 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2927 #else
   2928 	sc->sc_core_lock = NULL;
   2929 #endif
   2930 
   2931 	/* Attach the interface. */
   2932 	error = if_initialize(ifp);
   2933 	if (error != 0) {
   2934 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2935 		    error);
   2936 		return; /* Error */
   2937 	}
   2938 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2939 	ether_ifattach(ifp, enaddr);
   2940 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2941 	if_register(ifp);
   2942 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2943 	    RND_FLAG_DEFAULT);
   2944 
   2945 #ifdef WM_EVENT_COUNTERS
   2946 	/* Attach event counters. */
   2947 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2948 	    NULL, xname, "linkintr");
   2949 
   2950 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2951 	    NULL, xname, "tx_xoff");
   2952 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2953 	    NULL, xname, "tx_xon");
   2954 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2955 	    NULL, xname, "rx_xoff");
   2956 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2957 	    NULL, xname, "rx_xon");
   2958 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2959 	    NULL, xname, "rx_macctl");
   2960 #endif /* WM_EVENT_COUNTERS */
   2961 
   2962 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2963 		pmf_class_network_register(self, ifp);
   2964 	else
   2965 		aprint_error_dev(self, "couldn't establish power handler\n");
   2966 
   2967 	sc->sc_flags |= WM_F_ATTACHED;
   2968 out:
   2969 	return;
   2970 }
   2971 
   2972 /* The detach function (ca_detach) */
   2973 static int
   2974 wm_detach(device_t self, int flags __unused)
   2975 {
   2976 	struct wm_softc *sc = device_private(self);
   2977 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2978 	int i;
   2979 
   2980 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2981 		return 0;
   2982 
   2983 	/* Stop the interface. Callouts are stopped in it. */
   2984 	wm_stop(ifp, 1);
   2985 
   2986 	pmf_device_deregister(self);
   2987 
   2988 #ifdef WM_EVENT_COUNTERS
   2989 	evcnt_detach(&sc->sc_ev_linkintr);
   2990 
   2991 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2992 	evcnt_detach(&sc->sc_ev_tx_xon);
   2993 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2994 	evcnt_detach(&sc->sc_ev_rx_xon);
   2995 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2996 #endif /* WM_EVENT_COUNTERS */
   2997 
   2998 	rnd_detach_source(&sc->rnd_source);
   2999 
   3000 	/* Tell the firmware about the release */
   3001 	WM_CORE_LOCK(sc);
   3002 	wm_release_manageability(sc);
   3003 	wm_release_hw_control(sc);
   3004 	wm_enable_wakeup(sc);
   3005 	WM_CORE_UNLOCK(sc);
   3006 
   3007 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3008 
   3009 	/* Delete all remaining media. */
   3010 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   3011 
   3012 	ether_ifdetach(ifp);
   3013 	if_detach(ifp);
   3014 	if_percpuq_destroy(sc->sc_ipq);
   3015 
   3016 	/* Unload RX dmamaps and free mbufs */
   3017 	for (i = 0; i < sc->sc_nqueues; i++) {
   3018 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3019 		mutex_enter(rxq->rxq_lock);
   3020 		wm_rxdrain(rxq);
   3021 		mutex_exit(rxq->rxq_lock);
   3022 	}
   3023 	/* Must unlock here */
   3024 
   3025 	/* Disestablish the interrupt handler */
   3026 	for (i = 0; i < sc->sc_nintrs; i++) {
   3027 		if (sc->sc_ihs[i] != NULL) {
   3028 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3029 			sc->sc_ihs[i] = NULL;
   3030 		}
   3031 	}
   3032 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3033 
   3034 	wm_free_txrx_queues(sc);
   3035 
   3036 	/* Unmap the registers */
   3037 	if (sc->sc_ss) {
   3038 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3039 		sc->sc_ss = 0;
   3040 	}
   3041 	if (sc->sc_ios) {
   3042 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3043 		sc->sc_ios = 0;
   3044 	}
   3045 	if (sc->sc_flashs) {
   3046 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3047 		sc->sc_flashs = 0;
   3048 	}
   3049 
   3050 	if (sc->sc_core_lock)
   3051 		mutex_obj_free(sc->sc_core_lock);
   3052 	if (sc->sc_ich_phymtx)
   3053 		mutex_obj_free(sc->sc_ich_phymtx);
   3054 	if (sc->sc_ich_nvmmtx)
   3055 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3056 
   3057 	return 0;
   3058 }
   3059 
   3060 static bool
   3061 wm_suspend(device_t self, const pmf_qual_t *qual)
   3062 {
   3063 	struct wm_softc *sc = device_private(self);
   3064 
   3065 	wm_release_manageability(sc);
   3066 	wm_release_hw_control(sc);
   3067 	wm_enable_wakeup(sc);
   3068 
   3069 	return true;
   3070 }
   3071 
   3072 static bool
   3073 wm_resume(device_t self, const pmf_qual_t *qual)
   3074 {
   3075 	struct wm_softc *sc = device_private(self);
   3076 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3077 	pcireg_t reg;
   3078 	char buf[256];
   3079 
   3080 	reg = CSR_READ(sc, WMREG_WUS);
   3081 	if (reg != 0) {
   3082 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3083 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3084 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3085 	}
   3086 
   3087 	if (sc->sc_type >= WM_T_PCH2)
   3088 		wm_resume_workarounds_pchlan(sc);
   3089 	if ((ifp->if_flags & IFF_UP) == 0) {
   3090 		wm_reset(sc);
   3091 		/* Non-AMT based hardware can now take control from firmware */
   3092 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3093 			wm_get_hw_control(sc);
   3094 		wm_init_manageability(sc);
   3095 	} else {
   3096 		/*
   3097 		 * We called pmf_class_network_register(), so if_init() is
   3098 		 * automatically called when IFF_UP. wm_reset(),
   3099 		 * wm_get_hw_control() and wm_init_manageability() are called
   3100 		 * via wm_init().
   3101 		 */
   3102 	}
   3103 
   3104 	return true;
   3105 }
   3106 
   3107 /*
   3108  * wm_watchdog:		[ifnet interface function]
   3109  *
   3110  *	Watchdog timer handler.
   3111  */
   3112 static void
   3113 wm_watchdog(struct ifnet *ifp)
   3114 {
   3115 	int qid;
   3116 	struct wm_softc *sc = ifp->if_softc;
   3117 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3118 
   3119 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3120 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3121 
   3122 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3123 	}
   3124 
   3125 	/*
   3126 	 * IF any of queues hanged up, reset the interface.
   3127 	 */
   3128 	if (hang_queue != 0) {
   3129 		(void) wm_init(ifp);
   3130 
   3131 		/*
   3132 		 * There are still some upper layer processing which call
   3133 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3134 		 */
   3135 		/* Try to get more packets going. */
   3136 		ifp->if_start(ifp);
   3137 	}
   3138 }
   3139 
   3140 
   3141 static void
   3142 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3143 {
   3144 
   3145 	mutex_enter(txq->txq_lock);
   3146 	if (txq->txq_sending &&
   3147 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3148 		wm_watchdog_txq_locked(ifp, txq, hang);
   3149 	}
   3150 	mutex_exit(txq->txq_lock);
   3151 }
   3152 
   3153 static void
   3154 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3155     uint16_t *hang)
   3156 {
   3157 	struct wm_softc *sc = ifp->if_softc;
   3158 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3159 
   3160 	KASSERT(mutex_owned(txq->txq_lock));
   3161 
   3162 	/*
   3163 	 * Since we're using delayed interrupts, sweep up
   3164 	 * before we report an error.
   3165 	 */
   3166 	wm_txeof(txq, UINT_MAX);
   3167 
   3168 	if (txq->txq_sending)
   3169 		*hang |= __BIT(wmq->wmq_id);
   3170 
   3171 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3172 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3173 		    device_xname(sc->sc_dev));
   3174 	} else {
   3175 #ifdef WM_DEBUG
   3176 		int i, j;
   3177 		struct wm_txsoft *txs;
   3178 #endif
   3179 		log(LOG_ERR,
   3180 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3181 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3182 		    txq->txq_next);
   3183 		ifp->if_oerrors++;
   3184 #ifdef WM_DEBUG
   3185 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3186 		    i = WM_NEXTTXS(txq, i)) {
   3187 		    txs = &txq->txq_soft[i];
   3188 		    printf("txs %d tx %d -> %d\n",
   3189 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3190 		    for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3191 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3192 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3193 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3194 				    printf("\t %#08x%08x\n",
   3195 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3196 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3197 			    } else {
   3198 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3199 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3200 					txq->txq_descs[j].wtx_addr.wa_low);
   3201 				    printf("\t %#04x%02x%02x%08x\n",
   3202 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3203 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3204 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3205 					txq->txq_descs[j].wtx_cmdlen);
   3206 			    }
   3207 			if (j == txs->txs_lastdesc)
   3208 				break;
   3209 			}
   3210 		}
   3211 #endif
   3212 	}
   3213 }
   3214 
   3215 /*
   3216  * wm_tick:
   3217  *
   3218  *	One second timer, used to check link status, sweep up
   3219  *	completed transmit jobs, etc.
   3220  */
   3221 static void
   3222 wm_tick(void *arg)
   3223 {
   3224 	struct wm_softc *sc = arg;
   3225 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3226 #ifndef WM_MPSAFE
   3227 	int s = splnet();
   3228 #endif
   3229 
   3230 	WM_CORE_LOCK(sc);
   3231 
   3232 	if (sc->sc_core_stopping) {
   3233 		WM_CORE_UNLOCK(sc);
   3234 #ifndef WM_MPSAFE
   3235 		splx(s);
   3236 #endif
   3237 		return;
   3238 	}
   3239 
   3240 	if (sc->sc_type >= WM_T_82542_2_1) {
   3241 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3242 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3243 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3244 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3245 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3246 	}
   3247 
   3248 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3249 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3250 	    + CSR_READ(sc, WMREG_CRCERRS)
   3251 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3252 	    + CSR_READ(sc, WMREG_SYMERRC)
   3253 	    + CSR_READ(sc, WMREG_RXERRC)
   3254 	    + CSR_READ(sc, WMREG_SEC)
   3255 	    + CSR_READ(sc, WMREG_CEXTERR)
   3256 	    + CSR_READ(sc, WMREG_RLEC);
   3257 	/*
   3258 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3259 	 * memory. It does not mean the number of dropped packet. Because
   3260 	 * ethernet controller can receive packets in such case if there is
   3261 	 * space in phy's FIFO.
   3262 	 *
   3263 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3264 	 * own EVCNT instead of if_iqdrops.
   3265 	 */
   3266 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3267 
   3268 	if (sc->sc_flags & WM_F_HAS_MII)
   3269 		mii_tick(&sc->sc_mii);
   3270 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3271 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3272 		wm_serdes_tick(sc);
   3273 	else
   3274 		wm_tbi_tick(sc);
   3275 
   3276 	WM_CORE_UNLOCK(sc);
   3277 
   3278 	wm_watchdog(ifp);
   3279 
   3280 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3281 }
   3282 
   3283 static int
   3284 wm_ifflags_cb(struct ethercom *ec)
   3285 {
   3286 	struct ifnet *ifp = &ec->ec_if;
   3287 	struct wm_softc *sc = ifp->if_softc;
   3288 	int iffchange, ecchange;
   3289 	bool needreset = false;
   3290 	int rc = 0;
   3291 
   3292 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3293 		device_xname(sc->sc_dev), __func__));
   3294 
   3295 	WM_CORE_LOCK(sc);
   3296 
   3297 	/*
   3298 	 * Check for if_flags.
   3299 	 * Main usage is to prevent linkdown when opening bpf.
   3300 	 */
   3301 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3302 	sc->sc_if_flags = ifp->if_flags;
   3303 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3304 		needreset = true;
   3305 		goto ec;
   3306 	}
   3307 
   3308 	/* iff related updates */
   3309 	if ((iffchange & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3310 		wm_set_filter(sc);
   3311 
   3312 	wm_set_vlan(sc);
   3313 
   3314 ec:
   3315 	/* Check for ec_capenable. */
   3316 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3317 	sc->sc_ec_capenable = ec->ec_capenable;
   3318 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3319 		needreset = true;
   3320 		goto out;
   3321 	}
   3322 
   3323 	/* ec related updates */
   3324 	wm_set_eee(sc);
   3325 
   3326 out:
   3327 	if (needreset)
   3328 		rc = ENETRESET;
   3329 	WM_CORE_UNLOCK(sc);
   3330 
   3331 	return rc;
   3332 }
   3333 
   3334 /*
   3335  * wm_ioctl:		[ifnet interface function]
   3336  *
   3337  *	Handle control requests from the operator.
   3338  */
   3339 static int
   3340 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3341 {
   3342 	struct wm_softc *sc = ifp->if_softc;
   3343 	struct ifreq *ifr = (struct ifreq *) data;
   3344 	struct ifaddr *ifa = (struct ifaddr *)data;
   3345 	struct sockaddr_dl *sdl;
   3346 	int s, error;
   3347 
   3348 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3349 		device_xname(sc->sc_dev), __func__));
   3350 
   3351 #ifndef WM_MPSAFE
   3352 	s = splnet();
   3353 #endif
   3354 	switch (cmd) {
   3355 	case SIOCSIFMEDIA:
   3356 	case SIOCGIFMEDIA:
   3357 		WM_CORE_LOCK(sc);
   3358 		/* Flow control requires full-duplex mode. */
   3359 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3360 		    (ifr->ifr_media & IFM_FDX) == 0)
   3361 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3362 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3363 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3364 				/* We can do both TXPAUSE and RXPAUSE. */
   3365 				ifr->ifr_media |=
   3366 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3367 			}
   3368 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3369 		}
   3370 		WM_CORE_UNLOCK(sc);
   3371 #ifdef WM_MPSAFE
   3372 		s = splnet();
   3373 #endif
   3374 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3375 #ifdef WM_MPSAFE
   3376 		splx(s);
   3377 #endif
   3378 		break;
   3379 	case SIOCINITIFADDR:
   3380 		WM_CORE_LOCK(sc);
   3381 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3382 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3383 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3384 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3385 			/* unicast address is first multicast entry */
   3386 			wm_set_filter(sc);
   3387 			error = 0;
   3388 			WM_CORE_UNLOCK(sc);
   3389 			break;
   3390 		}
   3391 		WM_CORE_UNLOCK(sc);
   3392 		/*FALLTHROUGH*/
   3393 	default:
   3394 #ifdef WM_MPSAFE
   3395 		s = splnet();
   3396 #endif
   3397 		/* It may call wm_start, so unlock here */
   3398 		error = ether_ioctl(ifp, cmd, data);
   3399 #ifdef WM_MPSAFE
   3400 		splx(s);
   3401 #endif
   3402 		if (error != ENETRESET)
   3403 			break;
   3404 
   3405 		error = 0;
   3406 
   3407 		if (cmd == SIOCSIFCAP)
   3408 			error = (*ifp->if_init)(ifp);
   3409 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3410 			;
   3411 		else if (ifp->if_flags & IFF_RUNNING) {
   3412 			/*
   3413 			 * Multicast list has changed; set the hardware filter
   3414 			 * accordingly.
   3415 			 */
   3416 			WM_CORE_LOCK(sc);
   3417 			wm_set_filter(sc);
   3418 			WM_CORE_UNLOCK(sc);
   3419 		}
   3420 		break;
   3421 	}
   3422 
   3423 #ifndef WM_MPSAFE
   3424 	splx(s);
   3425 #endif
   3426 	return error;
   3427 }
   3428 
   3429 /* MAC address related */
   3430 
   3431 /*
   3432  * Get the offset of MAC address and return it.
   3433  * If error occured, use offset 0.
   3434  */
   3435 static uint16_t
   3436 wm_check_alt_mac_addr(struct wm_softc *sc)
   3437 {
   3438 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3439 	uint16_t offset = NVM_OFF_MACADDR;
   3440 
   3441 	/* Try to read alternative MAC address pointer */
   3442 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3443 		return 0;
   3444 
   3445 	/* Check pointer if it's valid or not. */
   3446 	if ((offset == 0x0000) || (offset == 0xffff))
   3447 		return 0;
   3448 
   3449 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3450 	/*
   3451 	 * Check whether alternative MAC address is valid or not.
   3452 	 * Some cards have non 0xffff pointer but those don't use
   3453 	 * alternative MAC address in reality.
   3454 	 *
   3455 	 * Check whether the broadcast bit is set or not.
   3456 	 */
   3457 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3458 		if (((myea[0] & 0xff) & 0x01) == 0)
   3459 			return offset; /* Found */
   3460 
   3461 	/* Not found */
   3462 	return 0;
   3463 }
   3464 
   3465 static int
   3466 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3467 {
   3468 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3469 	uint16_t offset = NVM_OFF_MACADDR;
   3470 	int do_invert = 0;
   3471 
   3472 	switch (sc->sc_type) {
   3473 	case WM_T_82580:
   3474 	case WM_T_I350:
   3475 	case WM_T_I354:
   3476 		/* EEPROM Top Level Partitioning */
   3477 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3478 		break;
   3479 	case WM_T_82571:
   3480 	case WM_T_82575:
   3481 	case WM_T_82576:
   3482 	case WM_T_80003:
   3483 	case WM_T_I210:
   3484 	case WM_T_I211:
   3485 		offset = wm_check_alt_mac_addr(sc);
   3486 		if (offset == 0)
   3487 			if ((sc->sc_funcid & 0x01) == 1)
   3488 				do_invert = 1;
   3489 		break;
   3490 	default:
   3491 		if ((sc->sc_funcid & 0x01) == 1)
   3492 			do_invert = 1;
   3493 		break;
   3494 	}
   3495 
   3496 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3497 		goto bad;
   3498 
   3499 	enaddr[0] = myea[0] & 0xff;
   3500 	enaddr[1] = myea[0] >> 8;
   3501 	enaddr[2] = myea[1] & 0xff;
   3502 	enaddr[3] = myea[1] >> 8;
   3503 	enaddr[4] = myea[2] & 0xff;
   3504 	enaddr[5] = myea[2] >> 8;
   3505 
   3506 	/*
   3507 	 * Toggle the LSB of the MAC address on the second port
   3508 	 * of some dual port cards.
   3509 	 */
   3510 	if (do_invert != 0)
   3511 		enaddr[5] ^= 1;
   3512 
   3513 	return 0;
   3514 
   3515  bad:
   3516 	return -1;
   3517 }
   3518 
   3519 /*
   3520  * wm_set_ral:
   3521  *
   3522  *	Set an entery in the receive address list.
   3523  */
   3524 static void
   3525 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3526 {
   3527 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3528 	uint32_t wlock_mac;
   3529 	int rv;
   3530 
   3531 	if (enaddr != NULL) {
   3532 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3533 		    (enaddr[3] << 24);
   3534 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3535 		ral_hi |= RAL_AV;
   3536 	} else {
   3537 		ral_lo = 0;
   3538 		ral_hi = 0;
   3539 	}
   3540 
   3541 	switch (sc->sc_type) {
   3542 	case WM_T_82542_2_0:
   3543 	case WM_T_82542_2_1:
   3544 	case WM_T_82543:
   3545 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3546 		CSR_WRITE_FLUSH(sc);
   3547 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3548 		CSR_WRITE_FLUSH(sc);
   3549 		break;
   3550 	case WM_T_PCH2:
   3551 	case WM_T_PCH_LPT:
   3552 	case WM_T_PCH_SPT:
   3553 	case WM_T_PCH_CNP:
   3554 		if (idx == 0) {
   3555 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3556 			CSR_WRITE_FLUSH(sc);
   3557 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3558 			CSR_WRITE_FLUSH(sc);
   3559 			return;
   3560 		}
   3561 		if (sc->sc_type != WM_T_PCH2) {
   3562 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3563 			    FWSM_WLOCK_MAC);
   3564 			addrl = WMREG_SHRAL(idx - 1);
   3565 			addrh = WMREG_SHRAH(idx - 1);
   3566 		} else {
   3567 			wlock_mac = 0;
   3568 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3569 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3570 		}
   3571 
   3572 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3573 			rv = wm_get_swflag_ich8lan(sc);
   3574 			if (rv != 0)
   3575 				return;
   3576 			CSR_WRITE(sc, addrl, ral_lo);
   3577 			CSR_WRITE_FLUSH(sc);
   3578 			CSR_WRITE(sc, addrh, ral_hi);
   3579 			CSR_WRITE_FLUSH(sc);
   3580 			wm_put_swflag_ich8lan(sc);
   3581 		}
   3582 
   3583 		break;
   3584 	default:
   3585 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3586 		CSR_WRITE_FLUSH(sc);
   3587 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3588 		CSR_WRITE_FLUSH(sc);
   3589 		break;
   3590 	}
   3591 }
   3592 
   3593 /*
   3594  * wm_mchash:
   3595  *
   3596  *	Compute the hash of the multicast address for the 4096-bit
   3597  *	multicast filter.
   3598  */
   3599 static uint32_t
   3600 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3601 {
   3602 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3603 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3604 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3605 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3606 	uint32_t hash;
   3607 
   3608 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3609 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3610 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3611 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3612 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3613 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3614 		return (hash & 0x3ff);
   3615 	}
   3616 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3617 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3618 
   3619 	return (hash & 0xfff);
   3620 }
   3621 
   3622 /*
   3623  *
   3624  *
   3625  */
   3626 static int
   3627 wm_rar_count(struct wm_softc *sc)
   3628 {
   3629 	int size;
   3630 
   3631 	switch (sc->sc_type) {
   3632 	case WM_T_ICH8:
   3633 		size = WM_RAL_TABSIZE_ICH8 -1;
   3634 		break;
   3635 	case WM_T_ICH9:
   3636 	case WM_T_ICH10:
   3637 	case WM_T_PCH:
   3638 		size = WM_RAL_TABSIZE_ICH8;
   3639 		break;
   3640 	case WM_T_PCH2:
   3641 		size = WM_RAL_TABSIZE_PCH2;
   3642 		break;
   3643 	case WM_T_PCH_LPT:
   3644 	case WM_T_PCH_SPT:
   3645 	case WM_T_PCH_CNP:
   3646 		size = WM_RAL_TABSIZE_PCH_LPT;
   3647 		break;
   3648 	case WM_T_82575:
   3649 	case WM_T_I210:
   3650 	case WM_T_I211:
   3651 		size = WM_RAL_TABSIZE_82575;
   3652 		break;
   3653 	case WM_T_82576:
   3654 	case WM_T_82580:
   3655 		size = WM_RAL_TABSIZE_82576;
   3656 		break;
   3657 	case WM_T_I350:
   3658 	case WM_T_I354:
   3659 		size = WM_RAL_TABSIZE_I350;
   3660 		break;
   3661 	default:
   3662 		size = WM_RAL_TABSIZE;
   3663 	}
   3664 
   3665 	return size;
   3666 }
   3667 
   3668 /*
   3669  * wm_set_filter:
   3670  *
   3671  *	Set up the receive filter.
   3672  */
   3673 static void
   3674 wm_set_filter(struct wm_softc *sc)
   3675 {
   3676 	struct ethercom *ec = &sc->sc_ethercom;
   3677 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3678 	struct ether_multi *enm;
   3679 	struct ether_multistep step;
   3680 	bus_addr_t mta_reg;
   3681 	uint32_t hash, reg, bit;
   3682 	int i, size, ralmax;
   3683 
   3684 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3685 		device_xname(sc->sc_dev), __func__));
   3686 
   3687 	if (sc->sc_type >= WM_T_82544)
   3688 		mta_reg = WMREG_CORDOVA_MTA;
   3689 	else
   3690 		mta_reg = WMREG_MTA;
   3691 
   3692 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3693 
   3694 	if (ifp->if_flags & IFF_BROADCAST)
   3695 		sc->sc_rctl |= RCTL_BAM;
   3696 	if (ifp->if_flags & IFF_PROMISC) {
   3697 		sc->sc_rctl |= RCTL_UPE;
   3698 		goto allmulti;
   3699 	}
   3700 
   3701 	/*
   3702 	 * Set the station address in the first RAL slot, and
   3703 	 * clear the remaining slots.
   3704 	 */
   3705 	size = wm_rar_count(sc);
   3706 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3707 
   3708 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3709 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3710 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3711 		switch (i) {
   3712 		case 0:
   3713 			/* We can use all entries */
   3714 			ralmax = size;
   3715 			break;
   3716 		case 1:
   3717 			/* Only RAR[0] */
   3718 			ralmax = 1;
   3719 			break;
   3720 		default:
   3721 			/* available SHRA + RAR[0] */
   3722 			ralmax = i + 1;
   3723 		}
   3724 	} else
   3725 		ralmax = size;
   3726 	for (i = 1; i < size; i++) {
   3727 		if (i < ralmax)
   3728 			wm_set_ral(sc, NULL, i);
   3729 	}
   3730 
   3731 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3732 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3733 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3734 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3735 		size = WM_ICH8_MC_TABSIZE;
   3736 	else
   3737 		size = WM_MC_TABSIZE;
   3738 	/* Clear out the multicast table. */
   3739 	for (i = 0; i < size; i++) {
   3740 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3741 		CSR_WRITE_FLUSH(sc);
   3742 	}
   3743 
   3744 	ETHER_LOCK(ec);
   3745 	ETHER_FIRST_MULTI(step, ec, enm);
   3746 	while (enm != NULL) {
   3747 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3748 			ETHER_UNLOCK(ec);
   3749 			/*
   3750 			 * We must listen to a range of multicast addresses.
   3751 			 * For now, just accept all multicasts, rather than
   3752 			 * trying to set only those filter bits needed to match
   3753 			 * the range.  (At this time, the only use of address
   3754 			 * ranges is for IP multicast routing, for which the
   3755 			 * range is big enough to require all bits set.)
   3756 			 */
   3757 			goto allmulti;
   3758 		}
   3759 
   3760 		hash = wm_mchash(sc, enm->enm_addrlo);
   3761 
   3762 		reg = (hash >> 5);
   3763 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3764 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3765 		    || (sc->sc_type == WM_T_PCH2)
   3766 		    || (sc->sc_type == WM_T_PCH_LPT)
   3767 		    || (sc->sc_type == WM_T_PCH_SPT)
   3768 		    || (sc->sc_type == WM_T_PCH_CNP))
   3769 			reg &= 0x1f;
   3770 		else
   3771 			reg &= 0x7f;
   3772 		bit = hash & 0x1f;
   3773 
   3774 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3775 		hash |= 1U << bit;
   3776 
   3777 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3778 			/*
   3779 			 * 82544 Errata 9: Certain register cannot be written
   3780 			 * with particular alignments in PCI-X bus operation
   3781 			 * (FCAH, MTA and VFTA).
   3782 			 */
   3783 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3784 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3785 			CSR_WRITE_FLUSH(sc);
   3786 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3787 			CSR_WRITE_FLUSH(sc);
   3788 		} else {
   3789 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3790 			CSR_WRITE_FLUSH(sc);
   3791 		}
   3792 
   3793 		ETHER_NEXT_MULTI(step, enm);
   3794 	}
   3795 	ETHER_UNLOCK(ec);
   3796 
   3797 	ifp->if_flags &= ~IFF_ALLMULTI;
   3798 	goto setit;
   3799 
   3800  allmulti:
   3801 	ifp->if_flags |= IFF_ALLMULTI;
   3802 	sc->sc_rctl |= RCTL_MPE;
   3803 
   3804  setit:
   3805 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3806 }
   3807 
   3808 /* Reset and init related */
   3809 
   3810 static void
   3811 wm_set_vlan(struct wm_softc *sc)
   3812 {
   3813 
   3814 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3815 		device_xname(sc->sc_dev), __func__));
   3816 
   3817 	/* Deal with VLAN enables. */
   3818 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3819 		sc->sc_ctrl |= CTRL_VME;
   3820 	else
   3821 		sc->sc_ctrl &= ~CTRL_VME;
   3822 
   3823 	/* Write the control registers. */
   3824 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3825 }
   3826 
   3827 static void
   3828 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3829 {
   3830 	uint32_t gcr;
   3831 	pcireg_t ctrl2;
   3832 
   3833 	gcr = CSR_READ(sc, WMREG_GCR);
   3834 
   3835 	/* Only take action if timeout value is defaulted to 0 */
   3836 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3837 		goto out;
   3838 
   3839 	if ((gcr & GCR_CAP_VER2) == 0) {
   3840 		gcr |= GCR_CMPL_TMOUT_10MS;
   3841 		goto out;
   3842 	}
   3843 
   3844 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3845 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3846 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3847 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3848 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3849 
   3850 out:
   3851 	/* Disable completion timeout resend */
   3852 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3853 
   3854 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3855 }
   3856 
   3857 void
   3858 wm_get_auto_rd_done(struct wm_softc *sc)
   3859 {
   3860 	int i;
   3861 
   3862 	/* wait for eeprom to reload */
   3863 	switch (sc->sc_type) {
   3864 	case WM_T_82571:
   3865 	case WM_T_82572:
   3866 	case WM_T_82573:
   3867 	case WM_T_82574:
   3868 	case WM_T_82583:
   3869 	case WM_T_82575:
   3870 	case WM_T_82576:
   3871 	case WM_T_82580:
   3872 	case WM_T_I350:
   3873 	case WM_T_I354:
   3874 	case WM_T_I210:
   3875 	case WM_T_I211:
   3876 	case WM_T_80003:
   3877 	case WM_T_ICH8:
   3878 	case WM_T_ICH9:
   3879 		for (i = 0; i < 10; i++) {
   3880 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3881 				break;
   3882 			delay(1000);
   3883 		}
   3884 		if (i == 10) {
   3885 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3886 			    "complete\n", device_xname(sc->sc_dev));
   3887 		}
   3888 		break;
   3889 	default:
   3890 		break;
   3891 	}
   3892 }
   3893 
   3894 void
   3895 wm_lan_init_done(struct wm_softc *sc)
   3896 {
   3897 	uint32_t reg = 0;
   3898 	int i;
   3899 
   3900 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3901 		device_xname(sc->sc_dev), __func__));
   3902 
   3903 	/* Wait for eeprom to reload */
   3904 	switch (sc->sc_type) {
   3905 	case WM_T_ICH10:
   3906 	case WM_T_PCH:
   3907 	case WM_T_PCH2:
   3908 	case WM_T_PCH_LPT:
   3909 	case WM_T_PCH_SPT:
   3910 	case WM_T_PCH_CNP:
   3911 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3912 			reg = CSR_READ(sc, WMREG_STATUS);
   3913 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3914 				break;
   3915 			delay(100);
   3916 		}
   3917 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3918 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3919 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3920 		}
   3921 		break;
   3922 	default:
   3923 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3924 		    __func__);
   3925 		break;
   3926 	}
   3927 
   3928 	reg &= ~STATUS_LAN_INIT_DONE;
   3929 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3930 }
   3931 
   3932 void
   3933 wm_get_cfg_done(struct wm_softc *sc)
   3934 {
   3935 	int mask;
   3936 	uint32_t reg;
   3937 	int i;
   3938 
   3939 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3940 		device_xname(sc->sc_dev), __func__));
   3941 
   3942 	/* Wait for eeprom to reload */
   3943 	switch (sc->sc_type) {
   3944 	case WM_T_82542_2_0:
   3945 	case WM_T_82542_2_1:
   3946 		/* null */
   3947 		break;
   3948 	case WM_T_82543:
   3949 	case WM_T_82544:
   3950 	case WM_T_82540:
   3951 	case WM_T_82545:
   3952 	case WM_T_82545_3:
   3953 	case WM_T_82546:
   3954 	case WM_T_82546_3:
   3955 	case WM_T_82541:
   3956 	case WM_T_82541_2:
   3957 	case WM_T_82547:
   3958 	case WM_T_82547_2:
   3959 	case WM_T_82573:
   3960 	case WM_T_82574:
   3961 	case WM_T_82583:
   3962 		/* generic */
   3963 		delay(10*1000);
   3964 		break;
   3965 	case WM_T_80003:
   3966 	case WM_T_82571:
   3967 	case WM_T_82572:
   3968 	case WM_T_82575:
   3969 	case WM_T_82576:
   3970 	case WM_T_82580:
   3971 	case WM_T_I350:
   3972 	case WM_T_I354:
   3973 	case WM_T_I210:
   3974 	case WM_T_I211:
   3975 		if (sc->sc_type == WM_T_82571) {
   3976 			/* Only 82571 shares port 0 */
   3977 			mask = EEMNGCTL_CFGDONE_0;
   3978 		} else
   3979 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3980 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3981 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3982 				break;
   3983 			delay(1000);
   3984 		}
   3985 		if (i >= WM_PHY_CFG_TIMEOUT)
   3986 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3987 				device_xname(sc->sc_dev), __func__));
   3988 		break;
   3989 	case WM_T_ICH8:
   3990 	case WM_T_ICH9:
   3991 	case WM_T_ICH10:
   3992 	case WM_T_PCH:
   3993 	case WM_T_PCH2:
   3994 	case WM_T_PCH_LPT:
   3995 	case WM_T_PCH_SPT:
   3996 	case WM_T_PCH_CNP:
   3997 		delay(10*1000);
   3998 		if (sc->sc_type >= WM_T_ICH10)
   3999 			wm_lan_init_done(sc);
   4000 		else
   4001 			wm_get_auto_rd_done(sc);
   4002 
   4003 		/* Clear PHY Reset Asserted bit */
   4004 		reg = CSR_READ(sc, WMREG_STATUS);
   4005 		if ((reg & STATUS_PHYRA) != 0)
   4006 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4007 		break;
   4008 	default:
   4009 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4010 		    __func__);
   4011 		break;
   4012 	}
   4013 }
   4014 
   4015 int
   4016 wm_phy_post_reset(struct wm_softc *sc)
   4017 {
   4018 	device_t dev = sc->sc_dev;
   4019 	uint16_t reg;
   4020 	int rv = 0;
   4021 
   4022 	/* This function is only for ICH8 and newer. */
   4023 	if (sc->sc_type < WM_T_ICH8)
   4024 		return 0;
   4025 
   4026 	if (wm_phy_resetisblocked(sc)) {
   4027 		/* XXX */
   4028 		device_printf(dev, "PHY is blocked\n");
   4029 		return -1;
   4030 	}
   4031 
   4032 	/* Allow time for h/w to get to quiescent state after reset */
   4033 	delay(10*1000);
   4034 
   4035 	/* Perform any necessary post-reset workarounds */
   4036 	if (sc->sc_type == WM_T_PCH)
   4037 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4038 	else if (sc->sc_type == WM_T_PCH2)
   4039 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4040 	if (rv != 0)
   4041 		return rv;
   4042 
   4043 	/* Clear the host wakeup bit after lcd reset */
   4044 	if (sc->sc_type >= WM_T_PCH) {
   4045 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4046 		reg &= ~BM_WUC_HOST_WU_BIT;
   4047 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4048 	}
   4049 
   4050 	/* Configure the LCD with the extended configuration region in NVM */
   4051 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4052 		return rv;
   4053 
   4054 	/* Configure the LCD with the OEM bits in NVM */
   4055 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4056 
   4057 	if (sc->sc_type == WM_T_PCH2) {
   4058 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4059 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4060 			delay(10 * 1000);
   4061 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4062 		}
   4063 		/* Set EEE LPI Update Timer to 200usec */
   4064 		rv = sc->phy.acquire(sc);
   4065 		if (rv)
   4066 			return rv;
   4067 		rv = wm_write_emi_reg_locked(dev,
   4068 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4069 		sc->phy.release(sc);
   4070 	}
   4071 
   4072 	return rv;
   4073 }
   4074 
   4075 /* Only for PCH and newer */
   4076 static int
   4077 wm_write_smbus_addr(struct wm_softc *sc)
   4078 {
   4079 	uint32_t strap, freq;
   4080 	uint16_t phy_data;
   4081 	int rv;
   4082 
   4083 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4084 		device_xname(sc->sc_dev), __func__));
   4085 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4086 
   4087 	strap = CSR_READ(sc, WMREG_STRAP);
   4088 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4089 
   4090 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4091 	if (rv != 0)
   4092 		return -1;
   4093 
   4094 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4095 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4096 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4097 
   4098 	if (sc->sc_phytype == WMPHY_I217) {
   4099 		/* Restore SMBus frequency */
   4100 		if (freq --) {
   4101 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4102 			    | HV_SMB_ADDR_FREQ_HIGH);
   4103 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4104 			    HV_SMB_ADDR_FREQ_LOW);
   4105 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4106 			    HV_SMB_ADDR_FREQ_HIGH);
   4107 		} else
   4108 			DPRINTF(WM_DEBUG_INIT,
   4109 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4110 				device_xname(sc->sc_dev), __func__));
   4111 	}
   4112 
   4113 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4114 	    phy_data);
   4115 }
   4116 
   4117 static int
   4118 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4119 {
   4120 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4121 	uint16_t phy_page = 0;
   4122 	int rv = 0;
   4123 
   4124 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4125 		device_xname(sc->sc_dev), __func__));
   4126 
   4127 	switch (sc->sc_type) {
   4128 	case WM_T_ICH8:
   4129 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4130 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4131 			return 0;
   4132 
   4133 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4134 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4135 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4136 			break;
   4137 		}
   4138 		/* FALLTHROUGH */
   4139 	case WM_T_PCH:
   4140 	case WM_T_PCH2:
   4141 	case WM_T_PCH_LPT:
   4142 	case WM_T_PCH_SPT:
   4143 	case WM_T_PCH_CNP:
   4144 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4145 		break;
   4146 	default:
   4147 		return 0;
   4148 	}
   4149 
   4150 	if ((rv = sc->phy.acquire(sc)) != 0)
   4151 		return rv;
   4152 
   4153 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4154 	if ((reg & sw_cfg_mask) == 0)
   4155 		goto release;
   4156 
   4157 	/*
   4158 	 * Make sure HW does not configure LCD from PHY extended configuration
   4159 	 * before SW configuration
   4160 	 */
   4161 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4162 	if ((sc->sc_type < WM_T_PCH2)
   4163 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4164 		goto release;
   4165 
   4166 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4167 		device_xname(sc->sc_dev), __func__));
   4168 	/* word_addr is in DWORD */
   4169 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4170 
   4171 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4172 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4173 	if (cnf_size == 0)
   4174 		goto release;
   4175 
   4176 	if (((sc->sc_type == WM_T_PCH)
   4177 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4178 	    || (sc->sc_type > WM_T_PCH)) {
   4179 		/*
   4180 		 * HW configures the SMBus address and LEDs when the OEM and
   4181 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4182 		 * are cleared, SW will configure them instead.
   4183 		 */
   4184 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4185 			device_xname(sc->sc_dev), __func__));
   4186 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4187 			goto release;
   4188 
   4189 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4190 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4191 		    (uint16_t)reg);
   4192 		if (rv != 0)
   4193 			goto release;
   4194 	}
   4195 
   4196 	/* Configure LCD from extended configuration region. */
   4197 	for (i = 0; i < cnf_size; i++) {
   4198 		uint16_t reg_data, reg_addr;
   4199 
   4200 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4201 			goto release;
   4202 
   4203 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4204 			goto release;
   4205 
   4206 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4207 			phy_page = reg_data;
   4208 
   4209 		reg_addr &= IGPHY_MAXREGADDR;
   4210 		reg_addr |= phy_page;
   4211 
   4212 		KASSERT(sc->phy.writereg_locked != NULL);
   4213 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4214 		    reg_data);
   4215 	}
   4216 
   4217 release:
   4218 	sc->phy.release(sc);
   4219 	return rv;
   4220 }
   4221 
   4222 /*
   4223  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4224  *  @sc:       pointer to the HW structure
   4225  *  @d0_state: boolean if entering d0 or d3 device state
   4226  *
   4227  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4228  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4229  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4230  */
   4231 int
   4232 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4233 {
   4234 	uint32_t mac_reg;
   4235 	uint16_t oem_reg;
   4236 	int rv;
   4237 
   4238 	if (sc->sc_type < WM_T_PCH)
   4239 		return 0;
   4240 
   4241 	rv = sc->phy.acquire(sc);
   4242 	if (rv != 0)
   4243 		return rv;
   4244 
   4245 	if (sc->sc_type == WM_T_PCH) {
   4246 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4247 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4248 			goto release;
   4249 	}
   4250 
   4251 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4252 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4253 		goto release;
   4254 
   4255 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4256 
   4257 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4258 	if (rv != 0)
   4259 		goto release;
   4260 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4261 
   4262 	if (d0_state) {
   4263 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4264 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4265 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4266 			oem_reg |= HV_OEM_BITS_LPLU;
   4267 	} else {
   4268 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4269 		    != 0)
   4270 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4271 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4272 		    != 0)
   4273 			oem_reg |= HV_OEM_BITS_LPLU;
   4274 	}
   4275 
   4276 	/* Set Restart auto-neg to activate the bits */
   4277 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4278 	    && (wm_phy_resetisblocked(sc) == false))
   4279 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4280 
   4281 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4282 
   4283 release:
   4284 	sc->phy.release(sc);
   4285 
   4286 	return rv;
   4287 }
   4288 
   4289 /* Init hardware bits */
   4290 void
   4291 wm_initialize_hardware_bits(struct wm_softc *sc)
   4292 {
   4293 	uint32_t tarc0, tarc1, reg;
   4294 
   4295 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4296 		device_xname(sc->sc_dev), __func__));
   4297 
   4298 	/* For 82571 variant, 80003 and ICHs */
   4299 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4300 	    || (sc->sc_type >= WM_T_80003)) {
   4301 
   4302 		/* Transmit Descriptor Control 0 */
   4303 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4304 		reg |= TXDCTL_COUNT_DESC;
   4305 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4306 
   4307 		/* Transmit Descriptor Control 1 */
   4308 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4309 		reg |= TXDCTL_COUNT_DESC;
   4310 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4311 
   4312 		/* TARC0 */
   4313 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4314 		switch (sc->sc_type) {
   4315 		case WM_T_82571:
   4316 		case WM_T_82572:
   4317 		case WM_T_82573:
   4318 		case WM_T_82574:
   4319 		case WM_T_82583:
   4320 		case WM_T_80003:
   4321 			/* Clear bits 30..27 */
   4322 			tarc0 &= ~__BITS(30, 27);
   4323 			break;
   4324 		default:
   4325 			break;
   4326 		}
   4327 
   4328 		switch (sc->sc_type) {
   4329 		case WM_T_82571:
   4330 		case WM_T_82572:
   4331 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4332 
   4333 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4334 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4335 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4336 			/* 8257[12] Errata No.7 */
   4337 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4338 
   4339 			/* TARC1 bit 28 */
   4340 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4341 				tarc1 &= ~__BIT(28);
   4342 			else
   4343 				tarc1 |= __BIT(28);
   4344 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4345 
   4346 			/*
   4347 			 * 8257[12] Errata No.13
   4348 			 * Disable Dyamic Clock Gating.
   4349 			 */
   4350 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4351 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4352 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4353 			break;
   4354 		case WM_T_82573:
   4355 		case WM_T_82574:
   4356 		case WM_T_82583:
   4357 			if ((sc->sc_type == WM_T_82574)
   4358 			    || (sc->sc_type == WM_T_82583))
   4359 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4360 
   4361 			/* Extended Device Control */
   4362 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4363 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4364 			reg |= __BIT(22);	/* Set bit 22 */
   4365 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4366 
   4367 			/* Device Control */
   4368 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4369 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4370 
   4371 			/* PCIe Control Register */
   4372 			/*
   4373 			 * 82573 Errata (unknown).
   4374 			 *
   4375 			 * 82574 Errata 25 and 82583 Errata 12
   4376 			 * "Dropped Rx Packets":
   4377 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4378 			 */
   4379 			reg = CSR_READ(sc, WMREG_GCR);
   4380 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4381 			CSR_WRITE(sc, WMREG_GCR, reg);
   4382 
   4383 			if ((sc->sc_type == WM_T_82574)
   4384 			    || (sc->sc_type == WM_T_82583)) {
   4385 				/*
   4386 				 * Document says this bit must be set for
   4387 				 * proper operation.
   4388 				 */
   4389 				reg = CSR_READ(sc, WMREG_GCR);
   4390 				reg |= __BIT(22);
   4391 				CSR_WRITE(sc, WMREG_GCR, reg);
   4392 
   4393 				/*
   4394 				 * Apply workaround for hardware errata
   4395 				 * documented in errata docs Fixes issue where
   4396 				 * some error prone or unreliable PCIe
   4397 				 * completions are occurring, particularly
   4398 				 * with ASPM enabled. Without fix, issue can
   4399 				 * cause Tx timeouts.
   4400 				 */
   4401 				reg = CSR_READ(sc, WMREG_GCR2);
   4402 				reg |= __BIT(0);
   4403 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4404 			}
   4405 			break;
   4406 		case WM_T_80003:
   4407 			/* TARC0 */
   4408 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4409 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4410 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4411 
   4412 			/* TARC1 bit 28 */
   4413 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4414 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4415 				tarc1 &= ~__BIT(28);
   4416 			else
   4417 				tarc1 |= __BIT(28);
   4418 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4419 			break;
   4420 		case WM_T_ICH8:
   4421 		case WM_T_ICH9:
   4422 		case WM_T_ICH10:
   4423 		case WM_T_PCH:
   4424 		case WM_T_PCH2:
   4425 		case WM_T_PCH_LPT:
   4426 		case WM_T_PCH_SPT:
   4427 		case WM_T_PCH_CNP:
   4428 			/* TARC0 */
   4429 			if (sc->sc_type == WM_T_ICH8) {
   4430 				/* Set TARC0 bits 29 and 28 */
   4431 				tarc0 |= __BITS(29, 28);
   4432 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4433 				tarc0 |= __BIT(29);
   4434 				/*
   4435 				 *  Drop bit 28. From Linux.
   4436 				 * See I218/I219 spec update
   4437 				 * "5. Buffer Overrun While the I219 is
   4438 				 * Processing DMA Transactions"
   4439 				 */
   4440 				tarc0 &= ~__BIT(28);
   4441 			}
   4442 			/* Set TARC0 bits 23,24,26,27 */
   4443 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4444 
   4445 			/* CTRL_EXT */
   4446 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4447 			reg |= __BIT(22);	/* Set bit 22 */
   4448 			/*
   4449 			 * Enable PHY low-power state when MAC is at D3
   4450 			 * w/o WoL
   4451 			 */
   4452 			if (sc->sc_type >= WM_T_PCH)
   4453 				reg |= CTRL_EXT_PHYPDEN;
   4454 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4455 
   4456 			/* TARC1 */
   4457 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4458 			/* bit 28 */
   4459 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4460 				tarc1 &= ~__BIT(28);
   4461 			else
   4462 				tarc1 |= __BIT(28);
   4463 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4464 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4465 
   4466 			/* Device Status */
   4467 			if (sc->sc_type == WM_T_ICH8) {
   4468 				reg = CSR_READ(sc, WMREG_STATUS);
   4469 				reg &= ~__BIT(31);
   4470 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4471 
   4472 			}
   4473 
   4474 			/* IOSFPC */
   4475 			if (sc->sc_type == WM_T_PCH_SPT) {
   4476 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4477 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4478 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4479 			}
   4480 			/*
   4481 			 * Work-around descriptor data corruption issue during
   4482 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4483 			 * capability.
   4484 			 */
   4485 			reg = CSR_READ(sc, WMREG_RFCTL);
   4486 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4487 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4488 			break;
   4489 		default:
   4490 			break;
   4491 		}
   4492 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4493 
   4494 		switch (sc->sc_type) {
   4495 		/*
   4496 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4497 		 * Avoid RSS Hash Value bug.
   4498 		 */
   4499 		case WM_T_82571:
   4500 		case WM_T_82572:
   4501 		case WM_T_82573:
   4502 		case WM_T_80003:
   4503 		case WM_T_ICH8:
   4504 			reg = CSR_READ(sc, WMREG_RFCTL);
   4505 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4506 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4507 			break;
   4508 		case WM_T_82574:
   4509 			/* use extened Rx descriptor. */
   4510 			reg = CSR_READ(sc, WMREG_RFCTL);
   4511 			reg |= WMREG_RFCTL_EXSTEN;
   4512 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4513 			break;
   4514 		default:
   4515 			break;
   4516 		}
   4517 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4518 		/*
   4519 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4520 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4521 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4522 		 * Correctly by the Device"
   4523 		 *
   4524 		 * I354(C2000) Errata AVR53:
   4525 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4526 		 * Hang"
   4527 		 */
   4528 		reg = CSR_READ(sc, WMREG_RFCTL);
   4529 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4530 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4531 	}
   4532 }
   4533 
   4534 static uint32_t
   4535 wm_rxpbs_adjust_82580(uint32_t val)
   4536 {
   4537 	uint32_t rv = 0;
   4538 
   4539 	if (val < __arraycount(wm_82580_rxpbs_table))
   4540 		rv = wm_82580_rxpbs_table[val];
   4541 
   4542 	return rv;
   4543 }
   4544 
   4545 /*
   4546  * wm_reset_phy:
   4547  *
   4548  *	generic PHY reset function.
   4549  *	Same as e1000_phy_hw_reset_generic()
   4550  */
   4551 static int
   4552 wm_reset_phy(struct wm_softc *sc)
   4553 {
   4554 	uint32_t reg;
   4555 
   4556 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4557 		device_xname(sc->sc_dev), __func__));
   4558 	if (wm_phy_resetisblocked(sc))
   4559 		return -1;
   4560 
   4561 	sc->phy.acquire(sc);
   4562 
   4563 	reg = CSR_READ(sc, WMREG_CTRL);
   4564 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4565 	CSR_WRITE_FLUSH(sc);
   4566 
   4567 	delay(sc->phy.reset_delay_us);
   4568 
   4569 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4570 	CSR_WRITE_FLUSH(sc);
   4571 
   4572 	delay(150);
   4573 
   4574 	sc->phy.release(sc);
   4575 
   4576 	wm_get_cfg_done(sc);
   4577 	wm_phy_post_reset(sc);
   4578 
   4579 	return 0;
   4580 }
   4581 
   4582 /*
   4583  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4584  * so it is enough to check sc->sc_queue[0] only.
   4585  */
   4586 static void
   4587 wm_flush_desc_rings(struct wm_softc *sc)
   4588 {
   4589 	pcireg_t preg;
   4590 	uint32_t reg;
   4591 	struct wm_txqueue *txq;
   4592 	wiseman_txdesc_t *txd;
   4593 	int nexttx;
   4594 	uint32_t rctl;
   4595 
   4596 	/* First, disable MULR fix in FEXTNVM11 */
   4597 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4598 	reg |= FEXTNVM11_DIS_MULRFIX;
   4599 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4600 
   4601 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4602 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4603 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4604 		return;
   4605 
   4606 	/* TX */
   4607 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4608 	    device_xname(sc->sc_dev), preg, reg);
   4609 	reg = CSR_READ(sc, WMREG_TCTL);
   4610 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4611 
   4612 	txq = &sc->sc_queue[0].wmq_txq;
   4613 	nexttx = txq->txq_next;
   4614 	txd = &txq->txq_descs[nexttx];
   4615 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4616 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4617 	txd->wtx_fields.wtxu_status = 0;
   4618 	txd->wtx_fields.wtxu_options = 0;
   4619 	txd->wtx_fields.wtxu_vlan = 0;
   4620 
   4621 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4622 	    BUS_SPACE_BARRIER_WRITE);
   4623 
   4624 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4625 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4626 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4627 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4628 	delay(250);
   4629 
   4630 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4631 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4632 		return;
   4633 
   4634 	/* RX */
   4635 	printf("%s: Need RX flush (reg = %08x)\n",
   4636 	    device_xname(sc->sc_dev), preg);
   4637 	rctl = CSR_READ(sc, WMREG_RCTL);
   4638 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4639 	CSR_WRITE_FLUSH(sc);
   4640 	delay(150);
   4641 
   4642 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4643 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4644 	reg &= 0xffffc000;
   4645 	/*
   4646 	 * update thresholds: prefetch threshold to 31, host threshold
   4647 	 * to 1 and make sure the granularity is "descriptors" and not
   4648 	 * "cache lines"
   4649 	 */
   4650 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4651 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4652 
   4653 	/*
   4654 	 * momentarily enable the RX ring for the changes to take
   4655 	 * effect
   4656 	 */
   4657 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4658 	CSR_WRITE_FLUSH(sc);
   4659 	delay(150);
   4660 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4661 }
   4662 
   4663 /*
   4664  * wm_reset:
   4665  *
   4666  *	Reset the i82542 chip.
   4667  */
   4668 static void
   4669 wm_reset(struct wm_softc *sc)
   4670 {
   4671 	int phy_reset = 0;
   4672 	int i, error = 0;
   4673 	uint32_t reg;
   4674 	uint16_t kmreg;
   4675 	int rv;
   4676 
   4677 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4678 		device_xname(sc->sc_dev), __func__));
   4679 	KASSERT(sc->sc_type != 0);
   4680 
   4681 	/*
   4682 	 * Allocate on-chip memory according to the MTU size.
   4683 	 * The Packet Buffer Allocation register must be written
   4684 	 * before the chip is reset.
   4685 	 */
   4686 	switch (sc->sc_type) {
   4687 	case WM_T_82547:
   4688 	case WM_T_82547_2:
   4689 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4690 		    PBA_22K : PBA_30K;
   4691 		for (i = 0; i < sc->sc_nqueues; i++) {
   4692 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4693 			txq->txq_fifo_head = 0;
   4694 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4695 			txq->txq_fifo_size =
   4696 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4697 			txq->txq_fifo_stall = 0;
   4698 		}
   4699 		break;
   4700 	case WM_T_82571:
   4701 	case WM_T_82572:
   4702 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4703 	case WM_T_80003:
   4704 		sc->sc_pba = PBA_32K;
   4705 		break;
   4706 	case WM_T_82573:
   4707 		sc->sc_pba = PBA_12K;
   4708 		break;
   4709 	case WM_T_82574:
   4710 	case WM_T_82583:
   4711 		sc->sc_pba = PBA_20K;
   4712 		break;
   4713 	case WM_T_82576:
   4714 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4715 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4716 		break;
   4717 	case WM_T_82580:
   4718 	case WM_T_I350:
   4719 	case WM_T_I354:
   4720 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4721 		break;
   4722 	case WM_T_I210:
   4723 	case WM_T_I211:
   4724 		sc->sc_pba = PBA_34K;
   4725 		break;
   4726 	case WM_T_ICH8:
   4727 		/* Workaround for a bit corruption issue in FIFO memory */
   4728 		sc->sc_pba = PBA_8K;
   4729 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4730 		break;
   4731 	case WM_T_ICH9:
   4732 	case WM_T_ICH10:
   4733 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4734 		    PBA_14K : PBA_10K;
   4735 		break;
   4736 	case WM_T_PCH:
   4737 	case WM_T_PCH2:	/* XXX 14K? */
   4738 	case WM_T_PCH_LPT:
   4739 	case WM_T_PCH_SPT:
   4740 	case WM_T_PCH_CNP:
   4741 		sc->sc_pba = PBA_26K;
   4742 		break;
   4743 	default:
   4744 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4745 		    PBA_40K : PBA_48K;
   4746 		break;
   4747 	}
   4748 	/*
   4749 	 * Only old or non-multiqueue devices have the PBA register
   4750 	 * XXX Need special handling for 82575.
   4751 	 */
   4752 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4753 	    || (sc->sc_type == WM_T_82575))
   4754 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4755 
   4756 	/* Prevent the PCI-E bus from sticking */
   4757 	if (sc->sc_flags & WM_F_PCIE) {
   4758 		int timeout = 800;
   4759 
   4760 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4761 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4762 
   4763 		while (timeout--) {
   4764 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4765 			    == 0)
   4766 				break;
   4767 			delay(100);
   4768 		}
   4769 		if (timeout == 0)
   4770 			device_printf(sc->sc_dev,
   4771 			    "failed to disable busmastering\n");
   4772 	}
   4773 
   4774 	/* Set the completion timeout for interface */
   4775 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4776 	    || (sc->sc_type == WM_T_82580)
   4777 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4778 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4779 		wm_set_pcie_completion_timeout(sc);
   4780 
   4781 	/* Clear interrupt */
   4782 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4783 	if (wm_is_using_msix(sc)) {
   4784 		if (sc->sc_type != WM_T_82574) {
   4785 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4786 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4787 		} else
   4788 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4789 	}
   4790 
   4791 	/* Stop the transmit and receive processes. */
   4792 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4793 	sc->sc_rctl &= ~RCTL_EN;
   4794 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4795 	CSR_WRITE_FLUSH(sc);
   4796 
   4797 	/* XXX set_tbi_sbp_82543() */
   4798 
   4799 	delay(10*1000);
   4800 
   4801 	/* Must acquire the MDIO ownership before MAC reset */
   4802 	switch (sc->sc_type) {
   4803 	case WM_T_82573:
   4804 	case WM_T_82574:
   4805 	case WM_T_82583:
   4806 		error = wm_get_hw_semaphore_82573(sc);
   4807 		break;
   4808 	default:
   4809 		break;
   4810 	}
   4811 
   4812 	/*
   4813 	 * 82541 Errata 29? & 82547 Errata 28?
   4814 	 * See also the description about PHY_RST bit in CTRL register
   4815 	 * in 8254x_GBe_SDM.pdf.
   4816 	 */
   4817 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4818 		CSR_WRITE(sc, WMREG_CTRL,
   4819 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4820 		CSR_WRITE_FLUSH(sc);
   4821 		delay(5000);
   4822 	}
   4823 
   4824 	switch (sc->sc_type) {
   4825 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4826 	case WM_T_82541:
   4827 	case WM_T_82541_2:
   4828 	case WM_T_82547:
   4829 	case WM_T_82547_2:
   4830 		/*
   4831 		 * On some chipsets, a reset through a memory-mapped write
   4832 		 * cycle can cause the chip to reset before completing the
   4833 		 * write cycle. This causes major headache that can be avoided
   4834 		 * by issuing the reset via indirect register writes through
   4835 		 * I/O space.
   4836 		 *
   4837 		 * So, if we successfully mapped the I/O BAR at attach time,
   4838 		 * use that. Otherwise, try our luck with a memory-mapped
   4839 		 * reset.
   4840 		 */
   4841 		if (sc->sc_flags & WM_F_IOH_VALID)
   4842 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4843 		else
   4844 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4845 		break;
   4846 	case WM_T_82545_3:
   4847 	case WM_T_82546_3:
   4848 		/* Use the shadow control register on these chips. */
   4849 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4850 		break;
   4851 	case WM_T_80003:
   4852 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4853 		sc->phy.acquire(sc);
   4854 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4855 		sc->phy.release(sc);
   4856 		break;
   4857 	case WM_T_ICH8:
   4858 	case WM_T_ICH9:
   4859 	case WM_T_ICH10:
   4860 	case WM_T_PCH:
   4861 	case WM_T_PCH2:
   4862 	case WM_T_PCH_LPT:
   4863 	case WM_T_PCH_SPT:
   4864 	case WM_T_PCH_CNP:
   4865 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4866 		if (wm_phy_resetisblocked(sc) == false) {
   4867 			/*
   4868 			 * Gate automatic PHY configuration by hardware on
   4869 			 * non-managed 82579
   4870 			 */
   4871 			if ((sc->sc_type == WM_T_PCH2)
   4872 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4873 				== 0))
   4874 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4875 
   4876 			reg |= CTRL_PHY_RESET;
   4877 			phy_reset = 1;
   4878 		} else
   4879 			printf("XXX reset is blocked!!!\n");
   4880 		sc->phy.acquire(sc);
   4881 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4882 		/* Don't insert a completion barrier when reset */
   4883 		delay(20*1000);
   4884 		mutex_exit(sc->sc_ich_phymtx);
   4885 		break;
   4886 	case WM_T_82580:
   4887 	case WM_T_I350:
   4888 	case WM_T_I354:
   4889 	case WM_T_I210:
   4890 	case WM_T_I211:
   4891 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4892 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4893 			CSR_WRITE_FLUSH(sc);
   4894 		delay(5000);
   4895 		break;
   4896 	case WM_T_82542_2_0:
   4897 	case WM_T_82542_2_1:
   4898 	case WM_T_82543:
   4899 	case WM_T_82540:
   4900 	case WM_T_82545:
   4901 	case WM_T_82546:
   4902 	case WM_T_82571:
   4903 	case WM_T_82572:
   4904 	case WM_T_82573:
   4905 	case WM_T_82574:
   4906 	case WM_T_82575:
   4907 	case WM_T_82576:
   4908 	case WM_T_82583:
   4909 	default:
   4910 		/* Everything else can safely use the documented method. */
   4911 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4912 		break;
   4913 	}
   4914 
   4915 	/* Must release the MDIO ownership after MAC reset */
   4916 	switch (sc->sc_type) {
   4917 	case WM_T_82573:
   4918 	case WM_T_82574:
   4919 	case WM_T_82583:
   4920 		if (error == 0)
   4921 			wm_put_hw_semaphore_82573(sc);
   4922 		break;
   4923 	default:
   4924 		break;
   4925 	}
   4926 
   4927 	/* Set Phy Config Counter to 50msec */
   4928 	if (sc->sc_type == WM_T_PCH2) {
   4929 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4930 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4931 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4932 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4933 	}
   4934 
   4935 	if (phy_reset != 0)
   4936 		wm_get_cfg_done(sc);
   4937 
   4938 	/* reload EEPROM */
   4939 	switch (sc->sc_type) {
   4940 	case WM_T_82542_2_0:
   4941 	case WM_T_82542_2_1:
   4942 	case WM_T_82543:
   4943 	case WM_T_82544:
   4944 		delay(10);
   4945 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4946 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4947 		CSR_WRITE_FLUSH(sc);
   4948 		delay(2000);
   4949 		break;
   4950 	case WM_T_82540:
   4951 	case WM_T_82545:
   4952 	case WM_T_82545_3:
   4953 	case WM_T_82546:
   4954 	case WM_T_82546_3:
   4955 		delay(5*1000);
   4956 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4957 		break;
   4958 	case WM_T_82541:
   4959 	case WM_T_82541_2:
   4960 	case WM_T_82547:
   4961 	case WM_T_82547_2:
   4962 		delay(20000);
   4963 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4964 		break;
   4965 	case WM_T_82571:
   4966 	case WM_T_82572:
   4967 	case WM_T_82573:
   4968 	case WM_T_82574:
   4969 	case WM_T_82583:
   4970 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4971 			delay(10);
   4972 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4973 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4974 			CSR_WRITE_FLUSH(sc);
   4975 		}
   4976 		/* check EECD_EE_AUTORD */
   4977 		wm_get_auto_rd_done(sc);
   4978 		/*
   4979 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4980 		 * is set.
   4981 		 */
   4982 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4983 		    || (sc->sc_type == WM_T_82583))
   4984 			delay(25*1000);
   4985 		break;
   4986 	case WM_T_82575:
   4987 	case WM_T_82576:
   4988 	case WM_T_82580:
   4989 	case WM_T_I350:
   4990 	case WM_T_I354:
   4991 	case WM_T_I210:
   4992 	case WM_T_I211:
   4993 	case WM_T_80003:
   4994 		/* check EECD_EE_AUTORD */
   4995 		wm_get_auto_rd_done(sc);
   4996 		break;
   4997 	case WM_T_ICH8:
   4998 	case WM_T_ICH9:
   4999 	case WM_T_ICH10:
   5000 	case WM_T_PCH:
   5001 	case WM_T_PCH2:
   5002 	case WM_T_PCH_LPT:
   5003 	case WM_T_PCH_SPT:
   5004 	case WM_T_PCH_CNP:
   5005 		break;
   5006 	default:
   5007 		panic("%s: unknown type\n", __func__);
   5008 	}
   5009 
   5010 	/* Check whether EEPROM is present or not */
   5011 	switch (sc->sc_type) {
   5012 	case WM_T_82575:
   5013 	case WM_T_82576:
   5014 	case WM_T_82580:
   5015 	case WM_T_I350:
   5016 	case WM_T_I354:
   5017 	case WM_T_ICH8:
   5018 	case WM_T_ICH9:
   5019 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5020 			/* Not found */
   5021 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5022 			if (sc->sc_type == WM_T_82575)
   5023 				wm_reset_init_script_82575(sc);
   5024 		}
   5025 		break;
   5026 	default:
   5027 		break;
   5028 	}
   5029 
   5030 	if (phy_reset != 0)
   5031 		wm_phy_post_reset(sc);
   5032 
   5033 	if ((sc->sc_type == WM_T_82580)
   5034 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5035 		/* clear global device reset status bit */
   5036 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5037 	}
   5038 
   5039 	/* Clear any pending interrupt events. */
   5040 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5041 	reg = CSR_READ(sc, WMREG_ICR);
   5042 	if (wm_is_using_msix(sc)) {
   5043 		if (sc->sc_type != WM_T_82574) {
   5044 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5045 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5046 		} else
   5047 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5048 	}
   5049 
   5050 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5051 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5052 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5053 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5054 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5055 		reg |= KABGTXD_BGSQLBIAS;
   5056 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5057 	}
   5058 
   5059 	/* reload sc_ctrl */
   5060 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5061 
   5062 	wm_set_eee(sc);
   5063 
   5064 	/*
   5065 	 * For PCH, this write will make sure that any noise will be detected
   5066 	 * as a CRC error and be dropped rather than show up as a bad packet
   5067 	 * to the DMA engine
   5068 	 */
   5069 	if (sc->sc_type == WM_T_PCH)
   5070 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5071 
   5072 	if (sc->sc_type >= WM_T_82544)
   5073 		CSR_WRITE(sc, WMREG_WUC, 0);
   5074 
   5075 	if (sc->sc_type < WM_T_82575)
   5076 		wm_disable_aspm(sc); /* Workaround for some chips */
   5077 
   5078 	wm_reset_mdicnfg_82580(sc);
   5079 
   5080 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5081 		wm_pll_workaround_i210(sc);
   5082 
   5083 	if (sc->sc_type == WM_T_80003) {
   5084 		/* default to TRUE to enable the MDIC W/A */
   5085 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5086 
   5087 		rv = wm_kmrn_readreg(sc,
   5088 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5089 		if (rv == 0) {
   5090 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5091 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5092 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5093 			else
   5094 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5095 		}
   5096 	}
   5097 }
   5098 
   5099 /*
   5100  * wm_add_rxbuf:
   5101  *
   5102  *	Add a receive buffer to the indiciated descriptor.
   5103  */
   5104 static int
   5105 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5106 {
   5107 	struct wm_softc *sc = rxq->rxq_sc;
   5108 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5109 	struct mbuf *m;
   5110 	int error;
   5111 
   5112 	KASSERT(mutex_owned(rxq->rxq_lock));
   5113 
   5114 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5115 	if (m == NULL)
   5116 		return ENOBUFS;
   5117 
   5118 	MCLGET(m, M_DONTWAIT);
   5119 	if ((m->m_flags & M_EXT) == 0) {
   5120 		m_freem(m);
   5121 		return ENOBUFS;
   5122 	}
   5123 
   5124 	if (rxs->rxs_mbuf != NULL)
   5125 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5126 
   5127 	rxs->rxs_mbuf = m;
   5128 
   5129 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5130 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   5131 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   5132 	if (error) {
   5133 		/* XXX XXX XXX */
   5134 		aprint_error_dev(sc->sc_dev,
   5135 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5136 		panic("wm_add_rxbuf");
   5137 	}
   5138 
   5139 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5140 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5141 
   5142 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5143 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5144 			wm_init_rxdesc(rxq, idx);
   5145 	} else
   5146 		wm_init_rxdesc(rxq, idx);
   5147 
   5148 	return 0;
   5149 }
   5150 
   5151 /*
   5152  * wm_rxdrain:
   5153  *
   5154  *	Drain the receive queue.
   5155  */
   5156 static void
   5157 wm_rxdrain(struct wm_rxqueue *rxq)
   5158 {
   5159 	struct wm_softc *sc = rxq->rxq_sc;
   5160 	struct wm_rxsoft *rxs;
   5161 	int i;
   5162 
   5163 	KASSERT(mutex_owned(rxq->rxq_lock));
   5164 
   5165 	for (i = 0; i < WM_NRXDESC; i++) {
   5166 		rxs = &rxq->rxq_soft[i];
   5167 		if (rxs->rxs_mbuf != NULL) {
   5168 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5169 			m_freem(rxs->rxs_mbuf);
   5170 			rxs->rxs_mbuf = NULL;
   5171 		}
   5172 	}
   5173 }
   5174 
   5175 /*
   5176  * Setup registers for RSS.
   5177  *
   5178  * XXX not yet VMDq support
   5179  */
   5180 static void
   5181 wm_init_rss(struct wm_softc *sc)
   5182 {
   5183 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5184 	int i;
   5185 
   5186 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5187 
   5188 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5189 		int qid, reta_ent;
   5190 
   5191 		qid  = i % sc->sc_nqueues;
   5192 		switch (sc->sc_type) {
   5193 		case WM_T_82574:
   5194 			reta_ent = __SHIFTIN(qid,
   5195 			    RETA_ENT_QINDEX_MASK_82574);
   5196 			break;
   5197 		case WM_T_82575:
   5198 			reta_ent = __SHIFTIN(qid,
   5199 			    RETA_ENT_QINDEX1_MASK_82575);
   5200 			break;
   5201 		default:
   5202 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5203 			break;
   5204 		}
   5205 
   5206 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5207 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5208 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5209 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5210 	}
   5211 
   5212 	rss_getkey((uint8_t *)rss_key);
   5213 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5214 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5215 
   5216 	if (sc->sc_type == WM_T_82574)
   5217 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5218 	else
   5219 		mrqc = MRQC_ENABLE_RSS_MQ;
   5220 
   5221 	/*
   5222 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5223 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5224 	 */
   5225 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5226 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5227 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5228 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5229 
   5230 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5231 }
   5232 
   5233 /*
   5234  * Adjust TX and RX queue numbers which the system actulally uses.
   5235  *
   5236  * The numbers are affected by below parameters.
   5237  *     - The nubmer of hardware queues
   5238  *     - The number of MSI-X vectors (= "nvectors" argument)
   5239  *     - ncpu
   5240  */
   5241 static void
   5242 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5243 {
   5244 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5245 
   5246 	if (nvectors < 2) {
   5247 		sc->sc_nqueues = 1;
   5248 		return;
   5249 	}
   5250 
   5251 	switch (sc->sc_type) {
   5252 	case WM_T_82572:
   5253 		hw_ntxqueues = 2;
   5254 		hw_nrxqueues = 2;
   5255 		break;
   5256 	case WM_T_82574:
   5257 		hw_ntxqueues = 2;
   5258 		hw_nrxqueues = 2;
   5259 		break;
   5260 	case WM_T_82575:
   5261 		hw_ntxqueues = 4;
   5262 		hw_nrxqueues = 4;
   5263 		break;
   5264 	case WM_T_82576:
   5265 		hw_ntxqueues = 16;
   5266 		hw_nrxqueues = 16;
   5267 		break;
   5268 	case WM_T_82580:
   5269 	case WM_T_I350:
   5270 	case WM_T_I354:
   5271 		hw_ntxqueues = 8;
   5272 		hw_nrxqueues = 8;
   5273 		break;
   5274 	case WM_T_I210:
   5275 		hw_ntxqueues = 4;
   5276 		hw_nrxqueues = 4;
   5277 		break;
   5278 	case WM_T_I211:
   5279 		hw_ntxqueues = 2;
   5280 		hw_nrxqueues = 2;
   5281 		break;
   5282 		/*
   5283 		 * As below ethernet controllers does not support MSI-X,
   5284 		 * this driver let them not use multiqueue.
   5285 		 *     - WM_T_80003
   5286 		 *     - WM_T_ICH8
   5287 		 *     - WM_T_ICH9
   5288 		 *     - WM_T_ICH10
   5289 		 *     - WM_T_PCH
   5290 		 *     - WM_T_PCH2
   5291 		 *     - WM_T_PCH_LPT
   5292 		 */
   5293 	default:
   5294 		hw_ntxqueues = 1;
   5295 		hw_nrxqueues = 1;
   5296 		break;
   5297 	}
   5298 
   5299 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5300 
   5301 	/*
   5302 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5303 	 * the number of queues used actually.
   5304 	 */
   5305 	if (nvectors < hw_nqueues + 1)
   5306 		sc->sc_nqueues = nvectors - 1;
   5307 	else
   5308 		sc->sc_nqueues = hw_nqueues;
   5309 
   5310 	/*
   5311 	 * As queues more then cpus cannot improve scaling, we limit
   5312 	 * the number of queues used actually.
   5313 	 */
   5314 	if (ncpu < sc->sc_nqueues)
   5315 		sc->sc_nqueues = ncpu;
   5316 }
   5317 
   5318 static inline bool
   5319 wm_is_using_msix(struct wm_softc *sc)
   5320 {
   5321 
   5322 	return (sc->sc_nintrs > 1);
   5323 }
   5324 
   5325 static inline bool
   5326 wm_is_using_multiqueue(struct wm_softc *sc)
   5327 {
   5328 
   5329 	return (sc->sc_nqueues > 1);
   5330 }
   5331 
   5332 static int
   5333 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5334 {
   5335 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5336 	wmq->wmq_id = qidx;
   5337 	wmq->wmq_intr_idx = intr_idx;
   5338 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5339 #ifdef WM_MPSAFE
   5340 	    | SOFTINT_MPSAFE
   5341 #endif
   5342 	    , wm_handle_queue, wmq);
   5343 	if (wmq->wmq_si != NULL)
   5344 		return 0;
   5345 
   5346 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5347 	    wmq->wmq_id);
   5348 
   5349 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5350 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5351 	return ENOMEM;
   5352 }
   5353 
   5354 /*
   5355  * Both single interrupt MSI and INTx can use this function.
   5356  */
   5357 static int
   5358 wm_setup_legacy(struct wm_softc *sc)
   5359 {
   5360 	pci_chipset_tag_t pc = sc->sc_pc;
   5361 	const char *intrstr = NULL;
   5362 	char intrbuf[PCI_INTRSTR_LEN];
   5363 	int error;
   5364 
   5365 	error = wm_alloc_txrx_queues(sc);
   5366 	if (error) {
   5367 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5368 		    error);
   5369 		return ENOMEM;
   5370 	}
   5371 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5372 	    sizeof(intrbuf));
   5373 #ifdef WM_MPSAFE
   5374 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5375 #endif
   5376 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5377 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5378 	if (sc->sc_ihs[0] == NULL) {
   5379 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5380 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5381 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5382 		return ENOMEM;
   5383 	}
   5384 
   5385 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5386 	sc->sc_nintrs = 1;
   5387 
   5388 	return wm_softint_establish(sc, 0, 0);
   5389 }
   5390 
   5391 static int
   5392 wm_setup_msix(struct wm_softc *sc)
   5393 {
   5394 	void *vih;
   5395 	kcpuset_t *affinity;
   5396 	int qidx, error, intr_idx, txrx_established;
   5397 	pci_chipset_tag_t pc = sc->sc_pc;
   5398 	const char *intrstr = NULL;
   5399 	char intrbuf[PCI_INTRSTR_LEN];
   5400 	char intr_xname[INTRDEVNAMEBUF];
   5401 
   5402 	if (sc->sc_nqueues < ncpu) {
   5403 		/*
   5404 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5405 		 * interrupts start from CPU#1.
   5406 		 */
   5407 		sc->sc_affinity_offset = 1;
   5408 	} else {
   5409 		/*
   5410 		 * In this case, this device use all CPUs. So, we unify
   5411 		 * affinitied cpu_index to msix vector number for readability.
   5412 		 */
   5413 		sc->sc_affinity_offset = 0;
   5414 	}
   5415 
   5416 	error = wm_alloc_txrx_queues(sc);
   5417 	if (error) {
   5418 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5419 		    error);
   5420 		return ENOMEM;
   5421 	}
   5422 
   5423 	kcpuset_create(&affinity, false);
   5424 	intr_idx = 0;
   5425 
   5426 	/*
   5427 	 * TX and RX
   5428 	 */
   5429 	txrx_established = 0;
   5430 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5431 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5432 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5433 
   5434 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5435 		    sizeof(intrbuf));
   5436 #ifdef WM_MPSAFE
   5437 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5438 		    PCI_INTR_MPSAFE, true);
   5439 #endif
   5440 		memset(intr_xname, 0, sizeof(intr_xname));
   5441 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5442 		    device_xname(sc->sc_dev), qidx);
   5443 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5444 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5445 		if (vih == NULL) {
   5446 			aprint_error_dev(sc->sc_dev,
   5447 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5448 			    intrstr ? " at " : "",
   5449 			    intrstr ? intrstr : "");
   5450 
   5451 			goto fail;
   5452 		}
   5453 		kcpuset_zero(affinity);
   5454 		/* Round-robin affinity */
   5455 		kcpuset_set(affinity, affinity_to);
   5456 		error = interrupt_distribute(vih, affinity, NULL);
   5457 		if (error == 0) {
   5458 			aprint_normal_dev(sc->sc_dev,
   5459 			    "for TX and RX interrupting at %s affinity to %u\n",
   5460 			    intrstr, affinity_to);
   5461 		} else {
   5462 			aprint_normal_dev(sc->sc_dev,
   5463 			    "for TX and RX interrupting at %s\n", intrstr);
   5464 		}
   5465 		sc->sc_ihs[intr_idx] = vih;
   5466 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5467 			goto fail;
   5468 		txrx_established++;
   5469 		intr_idx++;
   5470 	}
   5471 
   5472 	/*
   5473 	 * LINK
   5474 	 */
   5475 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5476 	    sizeof(intrbuf));
   5477 #ifdef WM_MPSAFE
   5478 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5479 #endif
   5480 	memset(intr_xname, 0, sizeof(intr_xname));
   5481 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5482 	    device_xname(sc->sc_dev));
   5483 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5484 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5485 	if (vih == NULL) {
   5486 		aprint_error_dev(sc->sc_dev,
   5487 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5488 		    intrstr ? " at " : "",
   5489 		    intrstr ? intrstr : "");
   5490 
   5491 		goto fail;
   5492 	}
   5493 	/* keep default affinity to LINK interrupt */
   5494 	aprint_normal_dev(sc->sc_dev,
   5495 	    "for LINK interrupting at %s\n", intrstr);
   5496 	sc->sc_ihs[intr_idx] = vih;
   5497 	sc->sc_link_intr_idx = intr_idx;
   5498 
   5499 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5500 	kcpuset_destroy(affinity);
   5501 	return 0;
   5502 
   5503  fail:
   5504 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5505 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5506 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5507 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5508 	}
   5509 
   5510 	kcpuset_destroy(affinity);
   5511 	return ENOMEM;
   5512 }
   5513 
   5514 static void
   5515 wm_unset_stopping_flags(struct wm_softc *sc)
   5516 {
   5517 	int i;
   5518 
   5519 	KASSERT(WM_CORE_LOCKED(sc));
   5520 
   5521 	/*
   5522 	 * must unset stopping flags in ascending order.
   5523 	 */
   5524 	for (i = 0; i < sc->sc_nqueues; i++) {
   5525 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5526 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5527 
   5528 		mutex_enter(txq->txq_lock);
   5529 		txq->txq_stopping = false;
   5530 		mutex_exit(txq->txq_lock);
   5531 
   5532 		mutex_enter(rxq->rxq_lock);
   5533 		rxq->rxq_stopping = false;
   5534 		mutex_exit(rxq->rxq_lock);
   5535 	}
   5536 
   5537 	sc->sc_core_stopping = false;
   5538 }
   5539 
   5540 static void
   5541 wm_set_stopping_flags(struct wm_softc *sc)
   5542 {
   5543 	int i;
   5544 
   5545 	KASSERT(WM_CORE_LOCKED(sc));
   5546 
   5547 	sc->sc_core_stopping = true;
   5548 
   5549 	/*
   5550 	 * must set stopping flags in ascending order.
   5551 	 */
   5552 	for (i = 0; i < sc->sc_nqueues; i++) {
   5553 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5554 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5555 
   5556 		mutex_enter(rxq->rxq_lock);
   5557 		rxq->rxq_stopping = true;
   5558 		mutex_exit(rxq->rxq_lock);
   5559 
   5560 		mutex_enter(txq->txq_lock);
   5561 		txq->txq_stopping = true;
   5562 		mutex_exit(txq->txq_lock);
   5563 	}
   5564 }
   5565 
   5566 /*
   5567  * write interrupt interval value to ITR or EITR
   5568  */
   5569 static void
   5570 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5571 {
   5572 
   5573 	if (!wmq->wmq_set_itr)
   5574 		return;
   5575 
   5576 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5577 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5578 
   5579 		/*
   5580 		 * 82575 doesn't have CNT_INGR field.
   5581 		 * So, overwrite counter field by software.
   5582 		 */
   5583 		if (sc->sc_type == WM_T_82575)
   5584 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5585 		else
   5586 			eitr |= EITR_CNT_INGR;
   5587 
   5588 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5589 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5590 		/*
   5591 		 * 82574 has both ITR and EITR. SET EITR when we use
   5592 		 * the multi queue function with MSI-X.
   5593 		 */
   5594 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5595 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5596 	} else {
   5597 		KASSERT(wmq->wmq_id == 0);
   5598 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5599 	}
   5600 
   5601 	wmq->wmq_set_itr = false;
   5602 }
   5603 
   5604 /*
   5605  * TODO
   5606  * Below dynamic calculation of itr is almost the same as linux igb,
   5607  * however it does not fit to wm(4). So, we will have been disable AIM
   5608  * until we will find appropriate calculation of itr.
   5609  */
   5610 /*
   5611  * calculate interrupt interval value to be going to write register in
   5612  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5613  */
   5614 static void
   5615 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5616 {
   5617 #ifdef NOTYET
   5618 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5619 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5620 	uint32_t avg_size = 0;
   5621 	uint32_t new_itr;
   5622 
   5623 	if (rxq->rxq_packets)
   5624 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5625 	if (txq->txq_packets)
   5626 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5627 
   5628 	if (avg_size == 0) {
   5629 		new_itr = 450; /* restore default value */
   5630 		goto out;
   5631 	}
   5632 
   5633 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5634 	avg_size += 24;
   5635 
   5636 	/* Don't starve jumbo frames */
   5637 	avg_size = uimin(avg_size, 3000);
   5638 
   5639 	/* Give a little boost to mid-size frames */
   5640 	if ((avg_size > 300) && (avg_size < 1200))
   5641 		new_itr = avg_size / 3;
   5642 	else
   5643 		new_itr = avg_size / 2;
   5644 
   5645 out:
   5646 	/*
   5647 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5648 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5649 	 */
   5650 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5651 		new_itr *= 4;
   5652 
   5653 	if (new_itr != wmq->wmq_itr) {
   5654 		wmq->wmq_itr = new_itr;
   5655 		wmq->wmq_set_itr = true;
   5656 	} else
   5657 		wmq->wmq_set_itr = false;
   5658 
   5659 	rxq->rxq_packets = 0;
   5660 	rxq->rxq_bytes = 0;
   5661 	txq->txq_packets = 0;
   5662 	txq->txq_bytes = 0;
   5663 #endif
   5664 }
   5665 
   5666 /*
   5667  * wm_init:		[ifnet interface function]
   5668  *
   5669  *	Initialize the interface.
   5670  */
   5671 static int
   5672 wm_init(struct ifnet *ifp)
   5673 {
   5674 	struct wm_softc *sc = ifp->if_softc;
   5675 	int ret;
   5676 
   5677 	WM_CORE_LOCK(sc);
   5678 	ret = wm_init_locked(ifp);
   5679 	WM_CORE_UNLOCK(sc);
   5680 
   5681 	return ret;
   5682 }
   5683 
   5684 static int
   5685 wm_init_locked(struct ifnet *ifp)
   5686 {
   5687 	struct wm_softc *sc = ifp->if_softc;
   5688 	struct ethercom *ec = &sc->sc_ethercom;
   5689 	int i, j, trynum, error = 0;
   5690 	uint32_t reg;
   5691 
   5692 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5693 		device_xname(sc->sc_dev), __func__));
   5694 	KASSERT(WM_CORE_LOCKED(sc));
   5695 
   5696 	/*
   5697 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5698 	 * There is a small but measurable benefit to avoiding the adjusment
   5699 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5700 	 * on such platforms.  One possibility is that the DMA itself is
   5701 	 * slightly more efficient if the front of the entire packet (instead
   5702 	 * of the front of the headers) is aligned.
   5703 	 *
   5704 	 * Note we must always set align_tweak to 0 if we are using
   5705 	 * jumbo frames.
   5706 	 */
   5707 #ifdef __NO_STRICT_ALIGNMENT
   5708 	sc->sc_align_tweak = 0;
   5709 #else
   5710 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5711 		sc->sc_align_tweak = 0;
   5712 	else
   5713 		sc->sc_align_tweak = 2;
   5714 #endif /* __NO_STRICT_ALIGNMENT */
   5715 
   5716 	/* Cancel any pending I/O. */
   5717 	wm_stop_locked(ifp, 0);
   5718 
   5719 	/* update statistics before reset */
   5720 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5721 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5722 
   5723 	/* PCH_SPT hardware workaround */
   5724 	if (sc->sc_type == WM_T_PCH_SPT)
   5725 		wm_flush_desc_rings(sc);
   5726 
   5727 	/* Reset the chip to a known state. */
   5728 	wm_reset(sc);
   5729 
   5730 	/*
   5731 	 * AMT based hardware can now take control from firmware
   5732 	 * Do this after reset.
   5733 	 */
   5734 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5735 		wm_get_hw_control(sc);
   5736 
   5737 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5738 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5739 		wm_legacy_irq_quirk_spt(sc);
   5740 
   5741 	/* Init hardware bits */
   5742 	wm_initialize_hardware_bits(sc);
   5743 
   5744 	/* Reset the PHY. */
   5745 	if (sc->sc_flags & WM_F_HAS_MII)
   5746 		wm_gmii_reset(sc);
   5747 
   5748 	if (sc->sc_type >= WM_T_ICH8) {
   5749 		reg = CSR_READ(sc, WMREG_GCR);
   5750 		/*
   5751 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5752 		 * default after reset.
   5753 		 */
   5754 		if (sc->sc_type == WM_T_ICH8)
   5755 			reg |= GCR_NO_SNOOP_ALL;
   5756 		else
   5757 			reg &= ~GCR_NO_SNOOP_ALL;
   5758 		CSR_WRITE(sc, WMREG_GCR, reg);
   5759 	}
   5760 	if ((sc->sc_type >= WM_T_ICH8)
   5761 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5762 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5763 
   5764 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5765 		reg |= CTRL_EXT_RO_DIS;
   5766 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5767 	}
   5768 
   5769 	/* Calculate (E)ITR value */
   5770 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5771 		/*
   5772 		 * For NEWQUEUE's EITR (except for 82575).
   5773 		 * 82575's EITR should be set same throttling value as other
   5774 		 * old controllers' ITR because the interrupt/sec calculation
   5775 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5776 		 *
   5777 		 * 82574's EITR should be set same throttling value as ITR.
   5778 		 *
   5779 		 * For N interrupts/sec, set this value to:
   5780 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5781 		 */
   5782 		sc->sc_itr_init = 450;
   5783 	} else if (sc->sc_type >= WM_T_82543) {
   5784 		/*
   5785 		 * Set up the interrupt throttling register (units of 256ns)
   5786 		 * Note that a footnote in Intel's documentation says this
   5787 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5788 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5789 		 * that that is also true for the 1024ns units of the other
   5790 		 * interrupt-related timer registers -- so, really, we ought
   5791 		 * to divide this value by 4 when the link speed is low.
   5792 		 *
   5793 		 * XXX implement this division at link speed change!
   5794 		 */
   5795 
   5796 		/*
   5797 		 * For N interrupts/sec, set this value to:
   5798 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5799 		 * absolute and packet timer values to this value
   5800 		 * divided by 4 to get "simple timer" behavior.
   5801 		 */
   5802 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5803 	}
   5804 
   5805 	error = wm_init_txrx_queues(sc);
   5806 	if (error)
   5807 		goto out;
   5808 
   5809 	/*
   5810 	 * Clear out the VLAN table -- we don't use it (yet).
   5811 	 */
   5812 	CSR_WRITE(sc, WMREG_VET, 0);
   5813 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5814 		trynum = 10; /* Due to hw errata */
   5815 	else
   5816 		trynum = 1;
   5817 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5818 		for (j = 0; j < trynum; j++)
   5819 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5820 
   5821 	/*
   5822 	 * Set up flow-control parameters.
   5823 	 *
   5824 	 * XXX Values could probably stand some tuning.
   5825 	 */
   5826 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5827 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5828 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5829 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5830 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5831 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5832 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5833 	}
   5834 
   5835 	sc->sc_fcrtl = FCRTL_DFLT;
   5836 	if (sc->sc_type < WM_T_82543) {
   5837 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5838 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5839 	} else {
   5840 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5841 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5842 	}
   5843 
   5844 	if (sc->sc_type == WM_T_80003)
   5845 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5846 	else
   5847 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5848 
   5849 	/* Writes the control register. */
   5850 	wm_set_vlan(sc);
   5851 
   5852 	if (sc->sc_flags & WM_F_HAS_MII) {
   5853 		uint16_t kmreg;
   5854 
   5855 		switch (sc->sc_type) {
   5856 		case WM_T_80003:
   5857 		case WM_T_ICH8:
   5858 		case WM_T_ICH9:
   5859 		case WM_T_ICH10:
   5860 		case WM_T_PCH:
   5861 		case WM_T_PCH2:
   5862 		case WM_T_PCH_LPT:
   5863 		case WM_T_PCH_SPT:
   5864 		case WM_T_PCH_CNP:
   5865 			/*
   5866 			 * Set the mac to wait the maximum time between each
   5867 			 * iteration and increase the max iterations when
   5868 			 * polling the phy; this fixes erroneous timeouts at
   5869 			 * 10Mbps.
   5870 			 */
   5871 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5872 			    0xFFFF);
   5873 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5874 			    &kmreg);
   5875 			kmreg |= 0x3F;
   5876 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5877 			    kmreg);
   5878 			break;
   5879 		default:
   5880 			break;
   5881 		}
   5882 
   5883 		if (sc->sc_type == WM_T_80003) {
   5884 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5885 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5886 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5887 
   5888 			/* Bypass RX and TX FIFO's */
   5889 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5890 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5891 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5892 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5893 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5894 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5895 		}
   5896 	}
   5897 #if 0
   5898 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5899 #endif
   5900 
   5901 	/* Set up checksum offload parameters. */
   5902 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5903 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5904 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5905 		reg |= RXCSUM_IPOFL;
   5906 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5907 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5908 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5909 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5910 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5911 
   5912 	/* Set registers about MSI-X */
   5913 	if (wm_is_using_msix(sc)) {
   5914 		uint32_t ivar;
   5915 		struct wm_queue *wmq;
   5916 		int qid, qintr_idx;
   5917 
   5918 		if (sc->sc_type == WM_T_82575) {
   5919 			/* Interrupt control */
   5920 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5921 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5922 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5923 
   5924 			/* TX and RX */
   5925 			for (i = 0; i < sc->sc_nqueues; i++) {
   5926 				wmq = &sc->sc_queue[i];
   5927 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5928 				    EITR_TX_QUEUE(wmq->wmq_id)
   5929 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5930 			}
   5931 			/* Link status */
   5932 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5933 			    EITR_OTHER);
   5934 		} else if (sc->sc_type == WM_T_82574) {
   5935 			/* Interrupt control */
   5936 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5937 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5938 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5939 
   5940 			/*
   5941 			 * workaround issue with spurious interrupts
   5942 			 * in MSI-X mode.
   5943 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5944 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5945 			 */
   5946 			reg = CSR_READ(sc, WMREG_RFCTL);
   5947 			reg |= WMREG_RFCTL_ACKDIS;
   5948 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5949 
   5950 			ivar = 0;
   5951 			/* TX and RX */
   5952 			for (i = 0; i < sc->sc_nqueues; i++) {
   5953 				wmq = &sc->sc_queue[i];
   5954 				qid = wmq->wmq_id;
   5955 				qintr_idx = wmq->wmq_intr_idx;
   5956 
   5957 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5958 				    IVAR_TX_MASK_Q_82574(qid));
   5959 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5960 				    IVAR_RX_MASK_Q_82574(qid));
   5961 			}
   5962 			/* Link status */
   5963 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5964 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5965 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5966 		} else {
   5967 			/* Interrupt control */
   5968 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5969 			    | GPIE_EIAME | GPIE_PBA);
   5970 
   5971 			switch (sc->sc_type) {
   5972 			case WM_T_82580:
   5973 			case WM_T_I350:
   5974 			case WM_T_I354:
   5975 			case WM_T_I210:
   5976 			case WM_T_I211:
   5977 				/* TX and RX */
   5978 				for (i = 0; i < sc->sc_nqueues; i++) {
   5979 					wmq = &sc->sc_queue[i];
   5980 					qid = wmq->wmq_id;
   5981 					qintr_idx = wmq->wmq_intr_idx;
   5982 
   5983 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5984 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5985 					ivar |= __SHIFTIN((qintr_idx
   5986 						| IVAR_VALID),
   5987 					    IVAR_TX_MASK_Q(qid));
   5988 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5989 					ivar |= __SHIFTIN((qintr_idx
   5990 						| IVAR_VALID),
   5991 					    IVAR_RX_MASK_Q(qid));
   5992 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5993 				}
   5994 				break;
   5995 			case WM_T_82576:
   5996 				/* TX and RX */
   5997 				for (i = 0; i < sc->sc_nqueues; i++) {
   5998 					wmq = &sc->sc_queue[i];
   5999 					qid = wmq->wmq_id;
   6000 					qintr_idx = wmq->wmq_intr_idx;
   6001 
   6002 					ivar = CSR_READ(sc,
   6003 					    WMREG_IVAR_Q_82576(qid));
   6004 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6005 					ivar |= __SHIFTIN((qintr_idx
   6006 						| IVAR_VALID),
   6007 					    IVAR_TX_MASK_Q_82576(qid));
   6008 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6009 					ivar |= __SHIFTIN((qintr_idx
   6010 						| IVAR_VALID),
   6011 					    IVAR_RX_MASK_Q_82576(qid));
   6012 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6013 					    ivar);
   6014 				}
   6015 				break;
   6016 			default:
   6017 				break;
   6018 			}
   6019 
   6020 			/* Link status */
   6021 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6022 			    IVAR_MISC_OTHER);
   6023 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6024 		}
   6025 
   6026 		if (wm_is_using_multiqueue(sc)) {
   6027 			wm_init_rss(sc);
   6028 
   6029 			/*
   6030 			** NOTE: Receive Full-Packet Checksum Offload
   6031 			** is mutually exclusive with Multiqueue. However
   6032 			** this is not the same as TCP/IP checksums which
   6033 			** still work.
   6034 			*/
   6035 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6036 			reg |= RXCSUM_PCSD;
   6037 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6038 		}
   6039 	}
   6040 
   6041 	/* Set up the interrupt registers. */
   6042 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6043 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6044 	    ICR_RXO | ICR_RXT0;
   6045 	if (wm_is_using_msix(sc)) {
   6046 		uint32_t mask;
   6047 		struct wm_queue *wmq;
   6048 
   6049 		switch (sc->sc_type) {
   6050 		case WM_T_82574:
   6051 			mask = 0;
   6052 			for (i = 0; i < sc->sc_nqueues; i++) {
   6053 				wmq = &sc->sc_queue[i];
   6054 				mask |= ICR_TXQ(wmq->wmq_id);
   6055 				mask |= ICR_RXQ(wmq->wmq_id);
   6056 			}
   6057 			mask |= ICR_OTHER;
   6058 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6059 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6060 			break;
   6061 		default:
   6062 			if (sc->sc_type == WM_T_82575) {
   6063 				mask = 0;
   6064 				for (i = 0; i < sc->sc_nqueues; i++) {
   6065 					wmq = &sc->sc_queue[i];
   6066 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6067 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6068 				}
   6069 				mask |= EITR_OTHER;
   6070 			} else {
   6071 				mask = 0;
   6072 				for (i = 0; i < sc->sc_nqueues; i++) {
   6073 					wmq = &sc->sc_queue[i];
   6074 					mask |= 1 << wmq->wmq_intr_idx;
   6075 				}
   6076 				mask |= 1 << sc->sc_link_intr_idx;
   6077 			}
   6078 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6079 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6080 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6081 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   6082 			break;
   6083 		}
   6084 	} else
   6085 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6086 
   6087 	/* Set up the inter-packet gap. */
   6088 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6089 
   6090 	if (sc->sc_type >= WM_T_82543) {
   6091 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6092 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6093 			wm_itrs_writereg(sc, wmq);
   6094 		}
   6095 		/*
   6096 		 * Link interrupts occur much less than TX
   6097 		 * interrupts and RX interrupts. So, we don't
   6098 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6099 		 * FreeBSD's if_igb.
   6100 		 */
   6101 	}
   6102 
   6103 	/* Set the VLAN ethernetype. */
   6104 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6105 
   6106 	/*
   6107 	 * Set up the transmit control register; we start out with
   6108 	 * a collision distance suitable for FDX, but update it whe
   6109 	 * we resolve the media type.
   6110 	 */
   6111 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6112 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6113 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6114 	if (sc->sc_type >= WM_T_82571)
   6115 		sc->sc_tctl |= TCTL_MULR;
   6116 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6117 
   6118 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6119 		/* Write TDT after TCTL.EN is set. See the document. */
   6120 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6121 	}
   6122 
   6123 	if (sc->sc_type == WM_T_80003) {
   6124 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6125 		reg &= ~TCTL_EXT_GCEX_MASK;
   6126 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6127 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6128 	}
   6129 
   6130 	/* Set the media. */
   6131 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6132 		goto out;
   6133 
   6134 	/* Configure for OS presence */
   6135 	wm_init_manageability(sc);
   6136 
   6137 	/*
   6138 	 * Set up the receive control register; we actually program the
   6139 	 * register when we set the receive filter. Use multicast address
   6140 	 * offset type 0.
   6141 	 *
   6142 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6143 	 * don't enable that feature.
   6144 	 */
   6145 	sc->sc_mchash_type = 0;
   6146 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6147 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6148 
   6149 	/*
   6150 	 * 82574 use one buffer extended Rx descriptor.
   6151 	 */
   6152 	if (sc->sc_type == WM_T_82574)
   6153 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6154 
   6155 	/*
   6156 	 * The I350 has a bug where it always strips the CRC whether
   6157 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6158 	 */
   6159 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6160 	    || (sc->sc_type == WM_T_I210))
   6161 		sc->sc_rctl |= RCTL_SECRC;
   6162 
   6163 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6164 	    && (ifp->if_mtu > ETHERMTU)) {
   6165 		sc->sc_rctl |= RCTL_LPE;
   6166 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6167 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6168 	}
   6169 
   6170 	if (MCLBYTES == 2048)
   6171 		sc->sc_rctl |= RCTL_2k;
   6172 	else {
   6173 		if (sc->sc_type >= WM_T_82543) {
   6174 			switch (MCLBYTES) {
   6175 			case 4096:
   6176 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6177 				break;
   6178 			case 8192:
   6179 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6180 				break;
   6181 			case 16384:
   6182 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6183 				break;
   6184 			default:
   6185 				panic("wm_init: MCLBYTES %d unsupported",
   6186 				    MCLBYTES);
   6187 				break;
   6188 			}
   6189 		} else
   6190 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6191 	}
   6192 
   6193 	/* Enable ECC */
   6194 	switch (sc->sc_type) {
   6195 	case WM_T_82571:
   6196 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6197 		reg |= PBA_ECC_CORR_EN;
   6198 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6199 		break;
   6200 	case WM_T_PCH_LPT:
   6201 	case WM_T_PCH_SPT:
   6202 	case WM_T_PCH_CNP:
   6203 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6204 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6205 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6206 
   6207 		sc->sc_ctrl |= CTRL_MEHE;
   6208 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6209 		break;
   6210 	default:
   6211 		break;
   6212 	}
   6213 
   6214 	/*
   6215 	 * Set the receive filter.
   6216 	 *
   6217 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6218 	 * the setting of RCTL.EN in wm_set_filter()
   6219 	 */
   6220 	wm_set_filter(sc);
   6221 
   6222 	/* On 575 and later set RDT only if RX enabled */
   6223 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6224 		int qidx;
   6225 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6226 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6227 			for (i = 0; i < WM_NRXDESC; i++) {
   6228 				mutex_enter(rxq->rxq_lock);
   6229 				wm_init_rxdesc(rxq, i);
   6230 				mutex_exit(rxq->rxq_lock);
   6231 
   6232 			}
   6233 		}
   6234 	}
   6235 
   6236 	wm_unset_stopping_flags(sc);
   6237 
   6238 	/* Start the one second link check clock. */
   6239 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6240 
   6241 	/* ...all done! */
   6242 	ifp->if_flags |= IFF_RUNNING;
   6243 	ifp->if_flags &= ~IFF_OACTIVE;
   6244 
   6245  out:
   6246 	/* Save last flags for the callback */
   6247 	sc->sc_if_flags = ifp->if_flags;
   6248 	sc->sc_ec_capenable = ec->ec_capenable;
   6249 	if (error)
   6250 		log(LOG_ERR, "%s: interface not running\n",
   6251 		    device_xname(sc->sc_dev));
   6252 	return error;
   6253 }
   6254 
   6255 /*
   6256  * wm_stop:		[ifnet interface function]
   6257  *
   6258  *	Stop transmission on the interface.
   6259  */
   6260 static void
   6261 wm_stop(struct ifnet *ifp, int disable)
   6262 {
   6263 	struct wm_softc *sc = ifp->if_softc;
   6264 
   6265 	WM_CORE_LOCK(sc);
   6266 	wm_stop_locked(ifp, disable);
   6267 	WM_CORE_UNLOCK(sc);
   6268 }
   6269 
   6270 static void
   6271 wm_stop_locked(struct ifnet *ifp, int disable)
   6272 {
   6273 	struct wm_softc *sc = ifp->if_softc;
   6274 	struct wm_txsoft *txs;
   6275 	int i, qidx;
   6276 
   6277 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6278 		device_xname(sc->sc_dev), __func__));
   6279 	KASSERT(WM_CORE_LOCKED(sc));
   6280 
   6281 	wm_set_stopping_flags(sc);
   6282 
   6283 	/* Stop the one second clock. */
   6284 	callout_stop(&sc->sc_tick_ch);
   6285 
   6286 	/* Stop the 82547 Tx FIFO stall check timer. */
   6287 	if (sc->sc_type == WM_T_82547)
   6288 		callout_stop(&sc->sc_txfifo_ch);
   6289 
   6290 	if (sc->sc_flags & WM_F_HAS_MII) {
   6291 		/* Down the MII. */
   6292 		mii_down(&sc->sc_mii);
   6293 	} else {
   6294 #if 0
   6295 		/* Should we clear PHY's status properly? */
   6296 		wm_reset(sc);
   6297 #endif
   6298 	}
   6299 
   6300 	/* Stop the transmit and receive processes. */
   6301 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6302 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6303 	sc->sc_rctl &= ~RCTL_EN;
   6304 
   6305 	/*
   6306 	 * Clear the interrupt mask to ensure the device cannot assert its
   6307 	 * interrupt line.
   6308 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6309 	 * service any currently pending or shared interrupt.
   6310 	 */
   6311 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6312 	sc->sc_icr = 0;
   6313 	if (wm_is_using_msix(sc)) {
   6314 		if (sc->sc_type != WM_T_82574) {
   6315 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6316 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6317 		} else
   6318 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6319 	}
   6320 
   6321 	/* Release any queued transmit buffers. */
   6322 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6323 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6324 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6325 		mutex_enter(txq->txq_lock);
   6326 		txq->txq_sending = false; /* ensure watchdog disabled */
   6327 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6328 			txs = &txq->txq_soft[i];
   6329 			if (txs->txs_mbuf != NULL) {
   6330 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6331 				m_freem(txs->txs_mbuf);
   6332 				txs->txs_mbuf = NULL;
   6333 			}
   6334 		}
   6335 		mutex_exit(txq->txq_lock);
   6336 	}
   6337 
   6338 	/* Mark the interface as down and cancel the watchdog timer. */
   6339 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6340 
   6341 	if (disable) {
   6342 		for (i = 0; i < sc->sc_nqueues; i++) {
   6343 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6344 			mutex_enter(rxq->rxq_lock);
   6345 			wm_rxdrain(rxq);
   6346 			mutex_exit(rxq->rxq_lock);
   6347 		}
   6348 	}
   6349 
   6350 #if 0 /* notyet */
   6351 	if (sc->sc_type >= WM_T_82544)
   6352 		CSR_WRITE(sc, WMREG_WUC, 0);
   6353 #endif
   6354 }
   6355 
   6356 static void
   6357 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6358 {
   6359 	struct mbuf *m;
   6360 	int i;
   6361 
   6362 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6363 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6364 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6365 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6366 		    m->m_data, m->m_len, m->m_flags);
   6367 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6368 	    i, i == 1 ? "" : "s");
   6369 }
   6370 
   6371 /*
   6372  * wm_82547_txfifo_stall:
   6373  *
   6374  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6375  *	reset the FIFO pointers, and restart packet transmission.
   6376  */
   6377 static void
   6378 wm_82547_txfifo_stall(void *arg)
   6379 {
   6380 	struct wm_softc *sc = arg;
   6381 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6382 
   6383 	mutex_enter(txq->txq_lock);
   6384 
   6385 	if (txq->txq_stopping)
   6386 		goto out;
   6387 
   6388 	if (txq->txq_fifo_stall) {
   6389 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6390 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6391 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6392 			/*
   6393 			 * Packets have drained.  Stop transmitter, reset
   6394 			 * FIFO pointers, restart transmitter, and kick
   6395 			 * the packet queue.
   6396 			 */
   6397 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6398 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6399 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6400 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6401 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6402 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6403 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6404 			CSR_WRITE_FLUSH(sc);
   6405 
   6406 			txq->txq_fifo_head = 0;
   6407 			txq->txq_fifo_stall = 0;
   6408 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6409 		} else {
   6410 			/*
   6411 			 * Still waiting for packets to drain; try again in
   6412 			 * another tick.
   6413 			 */
   6414 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6415 		}
   6416 	}
   6417 
   6418 out:
   6419 	mutex_exit(txq->txq_lock);
   6420 }
   6421 
   6422 /*
   6423  * wm_82547_txfifo_bugchk:
   6424  *
   6425  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6426  *	prevent enqueueing a packet that would wrap around the end
   6427  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6428  *
   6429  *	We do this by checking the amount of space before the end
   6430  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6431  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6432  *	the internal FIFO pointers to the beginning, and restart
   6433  *	transmission on the interface.
   6434  */
   6435 #define	WM_FIFO_HDR		0x10
   6436 #define	WM_82547_PAD_LEN	0x3e0
   6437 static int
   6438 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6439 {
   6440 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6441 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6442 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6443 
   6444 	/* Just return if already stalled. */
   6445 	if (txq->txq_fifo_stall)
   6446 		return 1;
   6447 
   6448 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6449 		/* Stall only occurs in half-duplex mode. */
   6450 		goto send_packet;
   6451 	}
   6452 
   6453 	if (len >= WM_82547_PAD_LEN + space) {
   6454 		txq->txq_fifo_stall = 1;
   6455 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6456 		return 1;
   6457 	}
   6458 
   6459  send_packet:
   6460 	txq->txq_fifo_head += len;
   6461 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6462 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6463 
   6464 	return 0;
   6465 }
   6466 
   6467 static int
   6468 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6469 {
   6470 	int error;
   6471 
   6472 	/*
   6473 	 * Allocate the control data structures, and create and load the
   6474 	 * DMA map for it.
   6475 	 *
   6476 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6477 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6478 	 * both sets within the same 4G segment.
   6479 	 */
   6480 	if (sc->sc_type < WM_T_82544)
   6481 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6482 	else
   6483 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6484 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6485 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6486 	else
   6487 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6488 
   6489 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6490 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6491 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6492 		aprint_error_dev(sc->sc_dev,
   6493 		    "unable to allocate TX control data, error = %d\n",
   6494 		    error);
   6495 		goto fail_0;
   6496 	}
   6497 
   6498 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6499 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6500 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6501 		aprint_error_dev(sc->sc_dev,
   6502 		    "unable to map TX control data, error = %d\n", error);
   6503 		goto fail_1;
   6504 	}
   6505 
   6506 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6507 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6508 		aprint_error_dev(sc->sc_dev,
   6509 		    "unable to create TX control data DMA map, error = %d\n",
   6510 		    error);
   6511 		goto fail_2;
   6512 	}
   6513 
   6514 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6515 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6516 		aprint_error_dev(sc->sc_dev,
   6517 		    "unable to load TX control data DMA map, error = %d\n",
   6518 		    error);
   6519 		goto fail_3;
   6520 	}
   6521 
   6522 	return 0;
   6523 
   6524  fail_3:
   6525 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6526  fail_2:
   6527 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6528 	    WM_TXDESCS_SIZE(txq));
   6529  fail_1:
   6530 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6531  fail_0:
   6532 	return error;
   6533 }
   6534 
   6535 static void
   6536 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6537 {
   6538 
   6539 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6540 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6541 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6542 	    WM_TXDESCS_SIZE(txq));
   6543 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6544 }
   6545 
   6546 static int
   6547 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6548 {
   6549 	int error;
   6550 	size_t rxq_descs_size;
   6551 
   6552 	/*
   6553 	 * Allocate the control data structures, and create and load the
   6554 	 * DMA map for it.
   6555 	 *
   6556 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6557 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6558 	 * both sets within the same 4G segment.
   6559 	 */
   6560 	rxq->rxq_ndesc = WM_NRXDESC;
   6561 	if (sc->sc_type == WM_T_82574)
   6562 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6563 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6564 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6565 	else
   6566 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6567 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6568 
   6569 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6570 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6571 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6572 		aprint_error_dev(sc->sc_dev,
   6573 		    "unable to allocate RX control data, error = %d\n",
   6574 		    error);
   6575 		goto fail_0;
   6576 	}
   6577 
   6578 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6579 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6580 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6581 		aprint_error_dev(sc->sc_dev,
   6582 		    "unable to map RX control data, error = %d\n", error);
   6583 		goto fail_1;
   6584 	}
   6585 
   6586 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6587 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6588 		aprint_error_dev(sc->sc_dev,
   6589 		    "unable to create RX control data DMA map, error = %d\n",
   6590 		    error);
   6591 		goto fail_2;
   6592 	}
   6593 
   6594 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6595 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6596 		aprint_error_dev(sc->sc_dev,
   6597 		    "unable to load RX control data DMA map, error = %d\n",
   6598 		    error);
   6599 		goto fail_3;
   6600 	}
   6601 
   6602 	return 0;
   6603 
   6604  fail_3:
   6605 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6606  fail_2:
   6607 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6608 	    rxq_descs_size);
   6609  fail_1:
   6610 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6611  fail_0:
   6612 	return error;
   6613 }
   6614 
   6615 static void
   6616 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6617 {
   6618 
   6619 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6620 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6621 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6622 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6623 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6624 }
   6625 
   6626 
   6627 static int
   6628 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6629 {
   6630 	int i, error;
   6631 
   6632 	/* Create the transmit buffer DMA maps. */
   6633 	WM_TXQUEUELEN(txq) =
   6634 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6635 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6636 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6637 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6638 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6639 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6640 			aprint_error_dev(sc->sc_dev,
   6641 			    "unable to create Tx DMA map %d, error = %d\n",
   6642 			    i, error);
   6643 			goto fail;
   6644 		}
   6645 	}
   6646 
   6647 	return 0;
   6648 
   6649  fail:
   6650 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6651 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6652 			bus_dmamap_destroy(sc->sc_dmat,
   6653 			    txq->txq_soft[i].txs_dmamap);
   6654 	}
   6655 	return error;
   6656 }
   6657 
   6658 static void
   6659 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6660 {
   6661 	int i;
   6662 
   6663 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6664 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6665 			bus_dmamap_destroy(sc->sc_dmat,
   6666 			    txq->txq_soft[i].txs_dmamap);
   6667 	}
   6668 }
   6669 
   6670 static int
   6671 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6672 {
   6673 	int i, error;
   6674 
   6675 	/* Create the receive buffer DMA maps. */
   6676 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6677 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6678 			    MCLBYTES, 0, 0,
   6679 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6680 			aprint_error_dev(sc->sc_dev,
   6681 			    "unable to create Rx DMA map %d error = %d\n",
   6682 			    i, error);
   6683 			goto fail;
   6684 		}
   6685 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6686 	}
   6687 
   6688 	return 0;
   6689 
   6690  fail:
   6691 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6692 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6693 			bus_dmamap_destroy(sc->sc_dmat,
   6694 			    rxq->rxq_soft[i].rxs_dmamap);
   6695 	}
   6696 	return error;
   6697 }
   6698 
   6699 static void
   6700 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6701 {
   6702 	int i;
   6703 
   6704 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6705 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6706 			bus_dmamap_destroy(sc->sc_dmat,
   6707 			    rxq->rxq_soft[i].rxs_dmamap);
   6708 	}
   6709 }
   6710 
   6711 /*
   6712  * wm_alloc_quques:
   6713  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6714  */
   6715 static int
   6716 wm_alloc_txrx_queues(struct wm_softc *sc)
   6717 {
   6718 	int i, error, tx_done, rx_done;
   6719 
   6720 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6721 	    KM_SLEEP);
   6722 	if (sc->sc_queue == NULL) {
   6723 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6724 		error = ENOMEM;
   6725 		goto fail_0;
   6726 	}
   6727 
   6728 	/*
   6729 	 * For transmission
   6730 	 */
   6731 	error = 0;
   6732 	tx_done = 0;
   6733 	for (i = 0; i < sc->sc_nqueues; i++) {
   6734 #ifdef WM_EVENT_COUNTERS
   6735 		int j;
   6736 		const char *xname;
   6737 #endif
   6738 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6739 		txq->txq_sc = sc;
   6740 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6741 
   6742 		error = wm_alloc_tx_descs(sc, txq);
   6743 		if (error)
   6744 			break;
   6745 		error = wm_alloc_tx_buffer(sc, txq);
   6746 		if (error) {
   6747 			wm_free_tx_descs(sc, txq);
   6748 			break;
   6749 		}
   6750 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6751 		if (txq->txq_interq == NULL) {
   6752 			wm_free_tx_descs(sc, txq);
   6753 			wm_free_tx_buffer(sc, txq);
   6754 			error = ENOMEM;
   6755 			break;
   6756 		}
   6757 
   6758 #ifdef WM_EVENT_COUNTERS
   6759 		xname = device_xname(sc->sc_dev);
   6760 
   6761 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6762 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6763 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6764 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6765 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6766 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6767 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6768 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6769 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6770 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6771 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6772 
   6773 		for (j = 0; j < WM_NTXSEGS; j++) {
   6774 			snprintf(txq->txq_txseg_evcnt_names[j],
   6775 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6776 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6777 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6778 		}
   6779 
   6780 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6781 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6782 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6783 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6784 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6785 #endif /* WM_EVENT_COUNTERS */
   6786 
   6787 		tx_done++;
   6788 	}
   6789 	if (error)
   6790 		goto fail_1;
   6791 
   6792 	/*
   6793 	 * For recieve
   6794 	 */
   6795 	error = 0;
   6796 	rx_done = 0;
   6797 	for (i = 0; i < sc->sc_nqueues; i++) {
   6798 #ifdef WM_EVENT_COUNTERS
   6799 		const char *xname;
   6800 #endif
   6801 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6802 		rxq->rxq_sc = sc;
   6803 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6804 
   6805 		error = wm_alloc_rx_descs(sc, rxq);
   6806 		if (error)
   6807 			break;
   6808 
   6809 		error = wm_alloc_rx_buffer(sc, rxq);
   6810 		if (error) {
   6811 			wm_free_rx_descs(sc, rxq);
   6812 			break;
   6813 		}
   6814 
   6815 #ifdef WM_EVENT_COUNTERS
   6816 		xname = device_xname(sc->sc_dev);
   6817 
   6818 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6819 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6820 
   6821 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6822 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6823 #endif /* WM_EVENT_COUNTERS */
   6824 
   6825 		rx_done++;
   6826 	}
   6827 	if (error)
   6828 		goto fail_2;
   6829 
   6830 	return 0;
   6831 
   6832  fail_2:
   6833 	for (i = 0; i < rx_done; i++) {
   6834 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6835 		wm_free_rx_buffer(sc, rxq);
   6836 		wm_free_rx_descs(sc, rxq);
   6837 		if (rxq->rxq_lock)
   6838 			mutex_obj_free(rxq->rxq_lock);
   6839 	}
   6840  fail_1:
   6841 	for (i = 0; i < tx_done; i++) {
   6842 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6843 		pcq_destroy(txq->txq_interq);
   6844 		wm_free_tx_buffer(sc, txq);
   6845 		wm_free_tx_descs(sc, txq);
   6846 		if (txq->txq_lock)
   6847 			mutex_obj_free(txq->txq_lock);
   6848 	}
   6849 
   6850 	kmem_free(sc->sc_queue,
   6851 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6852  fail_0:
   6853 	return error;
   6854 }
   6855 
   6856 /*
   6857  * wm_free_quques:
   6858  *	Free {tx,rx}descs and {tx,rx} buffers
   6859  */
   6860 static void
   6861 wm_free_txrx_queues(struct wm_softc *sc)
   6862 {
   6863 	int i;
   6864 
   6865 	for (i = 0; i < sc->sc_nqueues; i++) {
   6866 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6867 
   6868 #ifdef WM_EVENT_COUNTERS
   6869 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6870 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6871 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6872 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6873 #endif /* WM_EVENT_COUNTERS */
   6874 
   6875 		wm_free_rx_buffer(sc, rxq);
   6876 		wm_free_rx_descs(sc, rxq);
   6877 		if (rxq->rxq_lock)
   6878 			mutex_obj_free(rxq->rxq_lock);
   6879 	}
   6880 
   6881 	for (i = 0; i < sc->sc_nqueues; i++) {
   6882 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6883 		struct mbuf *m;
   6884 #ifdef WM_EVENT_COUNTERS
   6885 		int j;
   6886 
   6887 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6888 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6889 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6890 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6891 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6892 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6893 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6894 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6895 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6896 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6897 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6898 
   6899 		for (j = 0; j < WM_NTXSEGS; j++)
   6900 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6901 
   6902 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6903 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6904 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6905 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6906 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6907 #endif /* WM_EVENT_COUNTERS */
   6908 
   6909 		/* drain txq_interq */
   6910 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6911 			m_freem(m);
   6912 		pcq_destroy(txq->txq_interq);
   6913 
   6914 		wm_free_tx_buffer(sc, txq);
   6915 		wm_free_tx_descs(sc, txq);
   6916 		if (txq->txq_lock)
   6917 			mutex_obj_free(txq->txq_lock);
   6918 	}
   6919 
   6920 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6921 }
   6922 
   6923 static void
   6924 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6925 {
   6926 
   6927 	KASSERT(mutex_owned(txq->txq_lock));
   6928 
   6929 	/* Initialize the transmit descriptor ring. */
   6930 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6931 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6932 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6933 	txq->txq_free = WM_NTXDESC(txq);
   6934 	txq->txq_next = 0;
   6935 }
   6936 
   6937 static void
   6938 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6939     struct wm_txqueue *txq)
   6940 {
   6941 
   6942 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6943 		device_xname(sc->sc_dev), __func__));
   6944 	KASSERT(mutex_owned(txq->txq_lock));
   6945 
   6946 	if (sc->sc_type < WM_T_82543) {
   6947 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6948 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6949 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6950 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6951 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6952 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6953 	} else {
   6954 		int qid = wmq->wmq_id;
   6955 
   6956 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6957 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6958 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6959 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6960 
   6961 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6962 			/*
   6963 			 * Don't write TDT before TCTL.EN is set.
   6964 			 * See the document.
   6965 			 */
   6966 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6967 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6968 			    | TXDCTL_WTHRESH(0));
   6969 		else {
   6970 			/* XXX should update with AIM? */
   6971 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6972 			if (sc->sc_type >= WM_T_82540) {
   6973 				/* should be same */
   6974 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6975 			}
   6976 
   6977 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6978 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6979 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6980 		}
   6981 	}
   6982 }
   6983 
   6984 static void
   6985 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6986 {
   6987 	int i;
   6988 
   6989 	KASSERT(mutex_owned(txq->txq_lock));
   6990 
   6991 	/* Initialize the transmit job descriptors. */
   6992 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6993 		txq->txq_soft[i].txs_mbuf = NULL;
   6994 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6995 	txq->txq_snext = 0;
   6996 	txq->txq_sdirty = 0;
   6997 }
   6998 
   6999 static void
   7000 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7001     struct wm_txqueue *txq)
   7002 {
   7003 
   7004 	KASSERT(mutex_owned(txq->txq_lock));
   7005 
   7006 	/*
   7007 	 * Set up some register offsets that are different between
   7008 	 * the i82542 and the i82543 and later chips.
   7009 	 */
   7010 	if (sc->sc_type < WM_T_82543)
   7011 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7012 	else
   7013 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7014 
   7015 	wm_init_tx_descs(sc, txq);
   7016 	wm_init_tx_regs(sc, wmq, txq);
   7017 	wm_init_tx_buffer(sc, txq);
   7018 
   7019 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7020 	txq->txq_sending = false;
   7021 }
   7022 
   7023 static void
   7024 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7025     struct wm_rxqueue *rxq)
   7026 {
   7027 
   7028 	KASSERT(mutex_owned(rxq->rxq_lock));
   7029 
   7030 	/*
   7031 	 * Initialize the receive descriptor and receive job
   7032 	 * descriptor rings.
   7033 	 */
   7034 	if (sc->sc_type < WM_T_82543) {
   7035 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7036 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7037 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7038 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7039 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7040 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7041 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7042 
   7043 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7044 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7045 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7046 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7047 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7048 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7049 	} else {
   7050 		int qid = wmq->wmq_id;
   7051 
   7052 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7053 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7054 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7055 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7056 
   7057 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7058 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7059 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7060 
   7061 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7062 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7063 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7064 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7065 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7066 			    | RXDCTL_WTHRESH(1));
   7067 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7068 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7069 		} else {
   7070 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7071 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7072 			/* XXX should update with AIM? */
   7073 			CSR_WRITE(sc, WMREG_RDTR,
   7074 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7075 			/* MUST be same */
   7076 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7077 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7078 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7079 		}
   7080 	}
   7081 }
   7082 
   7083 static int
   7084 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7085 {
   7086 	struct wm_rxsoft *rxs;
   7087 	int error, i;
   7088 
   7089 	KASSERT(mutex_owned(rxq->rxq_lock));
   7090 
   7091 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7092 		rxs = &rxq->rxq_soft[i];
   7093 		if (rxs->rxs_mbuf == NULL) {
   7094 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7095 				log(LOG_ERR, "%s: unable to allocate or map "
   7096 				    "rx buffer %d, error = %d\n",
   7097 				    device_xname(sc->sc_dev), i, error);
   7098 				/*
   7099 				 * XXX Should attempt to run with fewer receive
   7100 				 * XXX buffers instead of just failing.
   7101 				 */
   7102 				wm_rxdrain(rxq);
   7103 				return ENOMEM;
   7104 			}
   7105 		} else {
   7106 			/*
   7107 			 * For 82575 and 82576, the RX descriptors must be
   7108 			 * initialized after the setting of RCTL.EN in
   7109 			 * wm_set_filter()
   7110 			 */
   7111 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7112 				wm_init_rxdesc(rxq, i);
   7113 		}
   7114 	}
   7115 	rxq->rxq_ptr = 0;
   7116 	rxq->rxq_discard = 0;
   7117 	WM_RXCHAIN_RESET(rxq);
   7118 
   7119 	return 0;
   7120 }
   7121 
   7122 static int
   7123 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7124     struct wm_rxqueue *rxq)
   7125 {
   7126 
   7127 	KASSERT(mutex_owned(rxq->rxq_lock));
   7128 
   7129 	/*
   7130 	 * Set up some register offsets that are different between
   7131 	 * the i82542 and the i82543 and later chips.
   7132 	 */
   7133 	if (sc->sc_type < WM_T_82543)
   7134 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7135 	else
   7136 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7137 
   7138 	wm_init_rx_regs(sc, wmq, rxq);
   7139 	return wm_init_rx_buffer(sc, rxq);
   7140 }
   7141 
   7142 /*
   7143  * wm_init_quques:
   7144  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7145  */
   7146 static int
   7147 wm_init_txrx_queues(struct wm_softc *sc)
   7148 {
   7149 	int i, error = 0;
   7150 
   7151 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7152 		device_xname(sc->sc_dev), __func__));
   7153 
   7154 	for (i = 0; i < sc->sc_nqueues; i++) {
   7155 		struct wm_queue *wmq = &sc->sc_queue[i];
   7156 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7157 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7158 
   7159 		/*
   7160 		 * TODO
   7161 		 * Currently, use constant variable instead of AIM.
   7162 		 * Furthermore, the interrupt interval of multiqueue which use
   7163 		 * polling mode is less than default value.
   7164 		 * More tuning and AIM are required.
   7165 		 */
   7166 		if (wm_is_using_multiqueue(sc))
   7167 			wmq->wmq_itr = 50;
   7168 		else
   7169 			wmq->wmq_itr = sc->sc_itr_init;
   7170 		wmq->wmq_set_itr = true;
   7171 
   7172 		mutex_enter(txq->txq_lock);
   7173 		wm_init_tx_queue(sc, wmq, txq);
   7174 		mutex_exit(txq->txq_lock);
   7175 
   7176 		mutex_enter(rxq->rxq_lock);
   7177 		error = wm_init_rx_queue(sc, wmq, rxq);
   7178 		mutex_exit(rxq->rxq_lock);
   7179 		if (error)
   7180 			break;
   7181 	}
   7182 
   7183 	return error;
   7184 }
   7185 
   7186 /*
   7187  * wm_tx_offload:
   7188  *
   7189  *	Set up TCP/IP checksumming parameters for the
   7190  *	specified packet.
   7191  */
   7192 static int
   7193 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7194     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7195 {
   7196 	struct mbuf *m0 = txs->txs_mbuf;
   7197 	struct livengood_tcpip_ctxdesc *t;
   7198 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7199 	uint32_t ipcse;
   7200 	struct ether_header *eh;
   7201 	int offset, iphl;
   7202 	uint8_t fields;
   7203 
   7204 	/*
   7205 	 * XXX It would be nice if the mbuf pkthdr had offset
   7206 	 * fields for the protocol headers.
   7207 	 */
   7208 
   7209 	eh = mtod(m0, struct ether_header *);
   7210 	switch (htons(eh->ether_type)) {
   7211 	case ETHERTYPE_IP:
   7212 	case ETHERTYPE_IPV6:
   7213 		offset = ETHER_HDR_LEN;
   7214 		break;
   7215 
   7216 	case ETHERTYPE_VLAN:
   7217 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7218 		break;
   7219 
   7220 	default:
   7221 		/*
   7222 		 * Don't support this protocol or encapsulation.
   7223 		 */
   7224 		*fieldsp = 0;
   7225 		*cmdp = 0;
   7226 		return 0;
   7227 	}
   7228 
   7229 	if ((m0->m_pkthdr.csum_flags &
   7230 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7231 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7232 	} else
   7233 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7234 
   7235 	ipcse = offset + iphl - 1;
   7236 
   7237 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7238 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7239 	seg = 0;
   7240 	fields = 0;
   7241 
   7242 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7243 		int hlen = offset + iphl;
   7244 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7245 
   7246 		if (__predict_false(m0->m_len <
   7247 				    (hlen + sizeof(struct tcphdr)))) {
   7248 			/*
   7249 			 * TCP/IP headers are not in the first mbuf; we need
   7250 			 * to do this the slow and painful way. Let's just
   7251 			 * hope this doesn't happen very often.
   7252 			 */
   7253 			struct tcphdr th;
   7254 
   7255 			WM_Q_EVCNT_INCR(txq, tsopain);
   7256 
   7257 			m_copydata(m0, hlen, sizeof(th), &th);
   7258 			if (v4) {
   7259 				struct ip ip;
   7260 
   7261 				m_copydata(m0, offset, sizeof(ip), &ip);
   7262 				ip.ip_len = 0;
   7263 				m_copyback(m0,
   7264 				    offset + offsetof(struct ip, ip_len),
   7265 				    sizeof(ip.ip_len), &ip.ip_len);
   7266 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7267 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7268 			} else {
   7269 				struct ip6_hdr ip6;
   7270 
   7271 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7272 				ip6.ip6_plen = 0;
   7273 				m_copyback(m0,
   7274 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7275 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7276 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7277 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7278 			}
   7279 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7280 			    sizeof(th.th_sum), &th.th_sum);
   7281 
   7282 			hlen += th.th_off << 2;
   7283 		} else {
   7284 			/*
   7285 			 * TCP/IP headers are in the first mbuf; we can do
   7286 			 * this the easy way.
   7287 			 */
   7288 			struct tcphdr *th;
   7289 
   7290 			if (v4) {
   7291 				struct ip *ip =
   7292 				    (void *)(mtod(m0, char *) + offset);
   7293 				th = (void *)(mtod(m0, char *) + hlen);
   7294 
   7295 				ip->ip_len = 0;
   7296 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7297 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7298 			} else {
   7299 				struct ip6_hdr *ip6 =
   7300 				    (void *)(mtod(m0, char *) + offset);
   7301 				th = (void *)(mtod(m0, char *) + hlen);
   7302 
   7303 				ip6->ip6_plen = 0;
   7304 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7305 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7306 			}
   7307 			hlen += th->th_off << 2;
   7308 		}
   7309 
   7310 		if (v4) {
   7311 			WM_Q_EVCNT_INCR(txq, tso);
   7312 			cmdlen |= WTX_TCPIP_CMD_IP;
   7313 		} else {
   7314 			WM_Q_EVCNT_INCR(txq, tso6);
   7315 			ipcse = 0;
   7316 		}
   7317 		cmd |= WTX_TCPIP_CMD_TSE;
   7318 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7319 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7320 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7321 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7322 	}
   7323 
   7324 	/*
   7325 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7326 	 * offload feature, if we load the context descriptor, we
   7327 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7328 	 */
   7329 
   7330 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7331 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7332 	    WTX_TCPIP_IPCSE(ipcse);
   7333 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7334 		WM_Q_EVCNT_INCR(txq, ipsum);
   7335 		fields |= WTX_IXSM;
   7336 	}
   7337 
   7338 	offset += iphl;
   7339 
   7340 	if (m0->m_pkthdr.csum_flags &
   7341 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7342 		WM_Q_EVCNT_INCR(txq, tusum);
   7343 		fields |= WTX_TXSM;
   7344 		tucs = WTX_TCPIP_TUCSS(offset) |
   7345 		    WTX_TCPIP_TUCSO(offset +
   7346 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7347 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7348 	} else if ((m0->m_pkthdr.csum_flags &
   7349 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7350 		WM_Q_EVCNT_INCR(txq, tusum6);
   7351 		fields |= WTX_TXSM;
   7352 		tucs = WTX_TCPIP_TUCSS(offset) |
   7353 		    WTX_TCPIP_TUCSO(offset +
   7354 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7355 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7356 	} else {
   7357 		/* Just initialize it to a valid TCP context. */
   7358 		tucs = WTX_TCPIP_TUCSS(offset) |
   7359 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7360 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7361 	}
   7362 
   7363 	/*
   7364 	 * We don't have to write context descriptor for every packet
   7365 	 * except for 82574. For 82574, we must write context descriptor
   7366 	 * for every packet when we use two descriptor queues.
   7367 	 * It would be overhead to write context descriptor for every packet,
   7368 	 * however it does not cause problems.
   7369 	 */
   7370 	/* Fill in the context descriptor. */
   7371 	t = (struct livengood_tcpip_ctxdesc *)
   7372 	    &txq->txq_descs[txq->txq_next];
   7373 	t->tcpip_ipcs = htole32(ipcs);
   7374 	t->tcpip_tucs = htole32(tucs);
   7375 	t->tcpip_cmdlen = htole32(cmdlen);
   7376 	t->tcpip_seg = htole32(seg);
   7377 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7378 
   7379 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7380 	txs->txs_ndesc++;
   7381 
   7382 	*cmdp = cmd;
   7383 	*fieldsp = fields;
   7384 
   7385 	return 0;
   7386 }
   7387 
   7388 static inline int
   7389 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7390 {
   7391 	struct wm_softc *sc = ifp->if_softc;
   7392 	u_int cpuid = cpu_index(curcpu());
   7393 
   7394 	/*
   7395 	 * Currently, simple distribute strategy.
   7396 	 * TODO:
   7397 	 * distribute by flowid(RSS has value).
   7398 	 */
   7399 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7400 }
   7401 
   7402 /*
   7403  * wm_start:		[ifnet interface function]
   7404  *
   7405  *	Start packet transmission on the interface.
   7406  */
   7407 static void
   7408 wm_start(struct ifnet *ifp)
   7409 {
   7410 	struct wm_softc *sc = ifp->if_softc;
   7411 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7412 
   7413 #ifdef WM_MPSAFE
   7414 	KASSERT(if_is_mpsafe(ifp));
   7415 #endif
   7416 	/*
   7417 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7418 	 */
   7419 
   7420 	mutex_enter(txq->txq_lock);
   7421 	if (!txq->txq_stopping)
   7422 		wm_start_locked(ifp);
   7423 	mutex_exit(txq->txq_lock);
   7424 }
   7425 
   7426 static void
   7427 wm_start_locked(struct ifnet *ifp)
   7428 {
   7429 	struct wm_softc *sc = ifp->if_softc;
   7430 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7431 
   7432 	wm_send_common_locked(ifp, txq, false);
   7433 }
   7434 
   7435 static int
   7436 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7437 {
   7438 	int qid;
   7439 	struct wm_softc *sc = ifp->if_softc;
   7440 	struct wm_txqueue *txq;
   7441 
   7442 	qid = wm_select_txqueue(ifp, m);
   7443 	txq = &sc->sc_queue[qid].wmq_txq;
   7444 
   7445 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7446 		m_freem(m);
   7447 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7448 		return ENOBUFS;
   7449 	}
   7450 
   7451 	/*
   7452 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7453 	 */
   7454 	ifp->if_obytes += m->m_pkthdr.len;
   7455 	if (m->m_flags & M_MCAST)
   7456 		ifp->if_omcasts++;
   7457 
   7458 	if (mutex_tryenter(txq->txq_lock)) {
   7459 		if (!txq->txq_stopping)
   7460 			wm_transmit_locked(ifp, txq);
   7461 		mutex_exit(txq->txq_lock);
   7462 	}
   7463 
   7464 	return 0;
   7465 }
   7466 
   7467 static void
   7468 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7469 {
   7470 
   7471 	wm_send_common_locked(ifp, txq, true);
   7472 }
   7473 
   7474 static void
   7475 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7476     bool is_transmit)
   7477 {
   7478 	struct wm_softc *sc = ifp->if_softc;
   7479 	struct mbuf *m0;
   7480 	struct wm_txsoft *txs;
   7481 	bus_dmamap_t dmamap;
   7482 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7483 	bus_addr_t curaddr;
   7484 	bus_size_t seglen, curlen;
   7485 	uint32_t cksumcmd;
   7486 	uint8_t cksumfields;
   7487 	bool remap = true;
   7488 
   7489 	KASSERT(mutex_owned(txq->txq_lock));
   7490 
   7491 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7492 		return;
   7493 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7494 		return;
   7495 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7496 		return;
   7497 
   7498 	/* Remember the previous number of free descriptors. */
   7499 	ofree = txq->txq_free;
   7500 
   7501 	/*
   7502 	 * Loop through the send queue, setting up transmit descriptors
   7503 	 * until we drain the queue, or use up all available transmit
   7504 	 * descriptors.
   7505 	 */
   7506 	for (;;) {
   7507 		m0 = NULL;
   7508 
   7509 		/* Get a work queue entry. */
   7510 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7511 			wm_txeof(txq, UINT_MAX);
   7512 			if (txq->txq_sfree == 0) {
   7513 				DPRINTF(WM_DEBUG_TX,
   7514 				    ("%s: TX: no free job descriptors\n",
   7515 					device_xname(sc->sc_dev)));
   7516 				WM_Q_EVCNT_INCR(txq, txsstall);
   7517 				break;
   7518 			}
   7519 		}
   7520 
   7521 		/* Grab a packet off the queue. */
   7522 		if (is_transmit)
   7523 			m0 = pcq_get(txq->txq_interq);
   7524 		else
   7525 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7526 		if (m0 == NULL)
   7527 			break;
   7528 
   7529 		DPRINTF(WM_DEBUG_TX,
   7530 		    ("%s: TX: have packet to transmit: %p\n",
   7531 			device_xname(sc->sc_dev), m0));
   7532 
   7533 		txs = &txq->txq_soft[txq->txq_snext];
   7534 		dmamap = txs->txs_dmamap;
   7535 
   7536 		use_tso = (m0->m_pkthdr.csum_flags &
   7537 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7538 
   7539 		/*
   7540 		 * So says the Linux driver:
   7541 		 * The controller does a simple calculation to make sure
   7542 		 * there is enough room in the FIFO before initiating the
   7543 		 * DMA for each buffer. The calc is:
   7544 		 *	4 = ceil(buffer len / MSS)
   7545 		 * To make sure we don't overrun the FIFO, adjust the max
   7546 		 * buffer len if the MSS drops.
   7547 		 */
   7548 		dmamap->dm_maxsegsz =
   7549 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7550 		    ? m0->m_pkthdr.segsz << 2
   7551 		    : WTX_MAX_LEN;
   7552 
   7553 		/*
   7554 		 * Load the DMA map.  If this fails, the packet either
   7555 		 * didn't fit in the allotted number of segments, or we
   7556 		 * were short on resources.  For the too-many-segments
   7557 		 * case, we simply report an error and drop the packet,
   7558 		 * since we can't sanely copy a jumbo packet to a single
   7559 		 * buffer.
   7560 		 */
   7561 retry:
   7562 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7563 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7564 		if (__predict_false(error)) {
   7565 			if (error == EFBIG) {
   7566 				if (remap == true) {
   7567 					struct mbuf *m;
   7568 
   7569 					remap = false;
   7570 					m = m_defrag(m0, M_NOWAIT);
   7571 					if (m != NULL) {
   7572 						WM_Q_EVCNT_INCR(txq, defrag);
   7573 						m0 = m;
   7574 						goto retry;
   7575 					}
   7576 				}
   7577 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7578 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7579 				    "DMA segments, dropping...\n",
   7580 				    device_xname(sc->sc_dev));
   7581 				wm_dump_mbuf_chain(sc, m0);
   7582 				m_freem(m0);
   7583 				continue;
   7584 			}
   7585 			/*  Short on resources, just stop for now. */
   7586 			DPRINTF(WM_DEBUG_TX,
   7587 			    ("%s: TX: dmamap load failed: %d\n",
   7588 				device_xname(sc->sc_dev), error));
   7589 			break;
   7590 		}
   7591 
   7592 		segs_needed = dmamap->dm_nsegs;
   7593 		if (use_tso) {
   7594 			/* For sentinel descriptor; see below. */
   7595 			segs_needed++;
   7596 		}
   7597 
   7598 		/*
   7599 		 * Ensure we have enough descriptors free to describe
   7600 		 * the packet. Note, we always reserve one descriptor
   7601 		 * at the end of the ring due to the semantics of the
   7602 		 * TDT register, plus one more in the event we need
   7603 		 * to load offload context.
   7604 		 */
   7605 		if (segs_needed > txq->txq_free - 2) {
   7606 			/*
   7607 			 * Not enough free descriptors to transmit this
   7608 			 * packet.  We haven't committed anything yet,
   7609 			 * so just unload the DMA map, put the packet
   7610 			 * pack on the queue, and punt. Notify the upper
   7611 			 * layer that there are no more slots left.
   7612 			 */
   7613 			DPRINTF(WM_DEBUG_TX,
   7614 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7615 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7616 				segs_needed, txq->txq_free - 1));
   7617 			if (!is_transmit)
   7618 				ifp->if_flags |= IFF_OACTIVE;
   7619 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7620 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7621 			WM_Q_EVCNT_INCR(txq, txdstall);
   7622 			break;
   7623 		}
   7624 
   7625 		/*
   7626 		 * Check for 82547 Tx FIFO bug. We need to do this
   7627 		 * once we know we can transmit the packet, since we
   7628 		 * do some internal FIFO space accounting here.
   7629 		 */
   7630 		if (sc->sc_type == WM_T_82547 &&
   7631 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7632 			DPRINTF(WM_DEBUG_TX,
   7633 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7634 				device_xname(sc->sc_dev)));
   7635 			if (!is_transmit)
   7636 				ifp->if_flags |= IFF_OACTIVE;
   7637 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7638 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7639 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7640 			break;
   7641 		}
   7642 
   7643 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7644 
   7645 		DPRINTF(WM_DEBUG_TX,
   7646 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7647 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7648 
   7649 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7650 
   7651 		/*
   7652 		 * Store a pointer to the packet so that we can free it
   7653 		 * later.
   7654 		 *
   7655 		 * Initially, we consider the number of descriptors the
   7656 		 * packet uses the number of DMA segments.  This may be
   7657 		 * incremented by 1 if we do checksum offload (a descriptor
   7658 		 * is used to set the checksum context).
   7659 		 */
   7660 		txs->txs_mbuf = m0;
   7661 		txs->txs_firstdesc = txq->txq_next;
   7662 		txs->txs_ndesc = segs_needed;
   7663 
   7664 		/* Set up offload parameters for this packet. */
   7665 		if (m0->m_pkthdr.csum_flags &
   7666 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7667 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7668 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7669 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7670 					  &cksumfields) != 0) {
   7671 				/* Error message already displayed. */
   7672 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7673 				continue;
   7674 			}
   7675 		} else {
   7676 			cksumcmd = 0;
   7677 			cksumfields = 0;
   7678 		}
   7679 
   7680 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7681 
   7682 		/* Sync the DMA map. */
   7683 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7684 		    BUS_DMASYNC_PREWRITE);
   7685 
   7686 		/* Initialize the transmit descriptor. */
   7687 		for (nexttx = txq->txq_next, seg = 0;
   7688 		     seg < dmamap->dm_nsegs; seg++) {
   7689 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7690 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7691 			     seglen != 0;
   7692 			     curaddr += curlen, seglen -= curlen,
   7693 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7694 				curlen = seglen;
   7695 
   7696 				/*
   7697 				 * So says the Linux driver:
   7698 				 * Work around for premature descriptor
   7699 				 * write-backs in TSO mode.  Append a
   7700 				 * 4-byte sentinel descriptor.
   7701 				 */
   7702 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7703 				    curlen > 8)
   7704 					curlen -= 4;
   7705 
   7706 				wm_set_dma_addr(
   7707 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7708 				txq->txq_descs[nexttx].wtx_cmdlen
   7709 				    = htole32(cksumcmd | curlen);
   7710 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7711 				    = 0;
   7712 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7713 				    = cksumfields;
   7714 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7715 				lasttx = nexttx;
   7716 
   7717 				DPRINTF(WM_DEBUG_TX,
   7718 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7719 					"len %#04zx\n",
   7720 					device_xname(sc->sc_dev), nexttx,
   7721 					(uint64_t)curaddr, curlen));
   7722 			}
   7723 		}
   7724 
   7725 		KASSERT(lasttx != -1);
   7726 
   7727 		/*
   7728 		 * Set up the command byte on the last descriptor of
   7729 		 * the packet. If we're in the interrupt delay window,
   7730 		 * delay the interrupt.
   7731 		 */
   7732 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7733 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7734 
   7735 		/*
   7736 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7737 		 * up the descriptor to encapsulate the packet for us.
   7738 		 *
   7739 		 * This is only valid on the last descriptor of the packet.
   7740 		 */
   7741 		if (vlan_has_tag(m0)) {
   7742 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7743 			    htole32(WTX_CMD_VLE);
   7744 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7745 			    = htole16(vlan_get_tag(m0));
   7746 		}
   7747 
   7748 		txs->txs_lastdesc = lasttx;
   7749 
   7750 		DPRINTF(WM_DEBUG_TX,
   7751 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7752 			device_xname(sc->sc_dev),
   7753 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7754 
   7755 		/* Sync the descriptors we're using. */
   7756 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7757 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7758 
   7759 		/* Give the packet to the chip. */
   7760 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7761 
   7762 		DPRINTF(WM_DEBUG_TX,
   7763 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7764 
   7765 		DPRINTF(WM_DEBUG_TX,
   7766 		    ("%s: TX: finished transmitting packet, job %d\n",
   7767 			device_xname(sc->sc_dev), txq->txq_snext));
   7768 
   7769 		/* Advance the tx pointer. */
   7770 		txq->txq_free -= txs->txs_ndesc;
   7771 		txq->txq_next = nexttx;
   7772 
   7773 		txq->txq_sfree--;
   7774 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7775 
   7776 		/* Pass the packet to any BPF listeners. */
   7777 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7778 	}
   7779 
   7780 	if (m0 != NULL) {
   7781 		if (!is_transmit)
   7782 			ifp->if_flags |= IFF_OACTIVE;
   7783 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7784 		WM_Q_EVCNT_INCR(txq, descdrop);
   7785 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7786 			__func__));
   7787 		m_freem(m0);
   7788 	}
   7789 
   7790 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7791 		/* No more slots; notify upper layer. */
   7792 		if (!is_transmit)
   7793 			ifp->if_flags |= IFF_OACTIVE;
   7794 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7795 	}
   7796 
   7797 	if (txq->txq_free != ofree) {
   7798 		/* Set a watchdog timer in case the chip flakes out. */
   7799 		txq->txq_lastsent = time_uptime;
   7800 		txq->txq_sending = true;
   7801 	}
   7802 }
   7803 
   7804 /*
   7805  * wm_nq_tx_offload:
   7806  *
   7807  *	Set up TCP/IP checksumming parameters for the
   7808  *	specified packet, for NEWQUEUE devices
   7809  */
   7810 static int
   7811 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7812     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7813 {
   7814 	struct mbuf *m0 = txs->txs_mbuf;
   7815 	uint32_t vl_len, mssidx, cmdc;
   7816 	struct ether_header *eh;
   7817 	int offset, iphl;
   7818 
   7819 	/*
   7820 	 * XXX It would be nice if the mbuf pkthdr had offset
   7821 	 * fields for the protocol headers.
   7822 	 */
   7823 	*cmdlenp = 0;
   7824 	*fieldsp = 0;
   7825 
   7826 	eh = mtod(m0, struct ether_header *);
   7827 	switch (htons(eh->ether_type)) {
   7828 	case ETHERTYPE_IP:
   7829 	case ETHERTYPE_IPV6:
   7830 		offset = ETHER_HDR_LEN;
   7831 		break;
   7832 
   7833 	case ETHERTYPE_VLAN:
   7834 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7835 		break;
   7836 
   7837 	default:
   7838 		/* Don't support this protocol or encapsulation. */
   7839 		*do_csum = false;
   7840 		return 0;
   7841 	}
   7842 	*do_csum = true;
   7843 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7844 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7845 
   7846 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7847 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7848 
   7849 	if ((m0->m_pkthdr.csum_flags &
   7850 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7851 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7852 	} else {
   7853 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7854 	}
   7855 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7856 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7857 
   7858 	if (vlan_has_tag(m0)) {
   7859 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7860 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7861 		*cmdlenp |= NQTX_CMD_VLE;
   7862 	}
   7863 
   7864 	mssidx = 0;
   7865 
   7866 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7867 		int hlen = offset + iphl;
   7868 		int tcp_hlen;
   7869 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7870 
   7871 		if (__predict_false(m0->m_len <
   7872 				    (hlen + sizeof(struct tcphdr)))) {
   7873 			/*
   7874 			 * TCP/IP headers are not in the first mbuf; we need
   7875 			 * to do this the slow and painful way. Let's just
   7876 			 * hope this doesn't happen very often.
   7877 			 */
   7878 			struct tcphdr th;
   7879 
   7880 			WM_Q_EVCNT_INCR(txq, tsopain);
   7881 
   7882 			m_copydata(m0, hlen, sizeof(th), &th);
   7883 			if (v4) {
   7884 				struct ip ip;
   7885 
   7886 				m_copydata(m0, offset, sizeof(ip), &ip);
   7887 				ip.ip_len = 0;
   7888 				m_copyback(m0,
   7889 				    offset + offsetof(struct ip, ip_len),
   7890 				    sizeof(ip.ip_len), &ip.ip_len);
   7891 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7892 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7893 			} else {
   7894 				struct ip6_hdr ip6;
   7895 
   7896 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7897 				ip6.ip6_plen = 0;
   7898 				m_copyback(m0,
   7899 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7900 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7901 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7902 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7903 			}
   7904 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7905 			    sizeof(th.th_sum), &th.th_sum);
   7906 
   7907 			tcp_hlen = th.th_off << 2;
   7908 		} else {
   7909 			/*
   7910 			 * TCP/IP headers are in the first mbuf; we can do
   7911 			 * this the easy way.
   7912 			 */
   7913 			struct tcphdr *th;
   7914 
   7915 			if (v4) {
   7916 				struct ip *ip =
   7917 				    (void *)(mtod(m0, char *) + offset);
   7918 				th = (void *)(mtod(m0, char *) + hlen);
   7919 
   7920 				ip->ip_len = 0;
   7921 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7922 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7923 			} else {
   7924 				struct ip6_hdr *ip6 =
   7925 				    (void *)(mtod(m0, char *) + offset);
   7926 				th = (void *)(mtod(m0, char *) + hlen);
   7927 
   7928 				ip6->ip6_plen = 0;
   7929 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7930 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7931 			}
   7932 			tcp_hlen = th->th_off << 2;
   7933 		}
   7934 		hlen += tcp_hlen;
   7935 		*cmdlenp |= NQTX_CMD_TSE;
   7936 
   7937 		if (v4) {
   7938 			WM_Q_EVCNT_INCR(txq, tso);
   7939 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7940 		} else {
   7941 			WM_Q_EVCNT_INCR(txq, tso6);
   7942 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7943 		}
   7944 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7945 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7946 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7947 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7948 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7949 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7950 	} else {
   7951 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7952 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7953 	}
   7954 
   7955 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7956 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7957 		cmdc |= NQTXC_CMD_IP4;
   7958 	}
   7959 
   7960 	if (m0->m_pkthdr.csum_flags &
   7961 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7962 		WM_Q_EVCNT_INCR(txq, tusum);
   7963 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7964 			cmdc |= NQTXC_CMD_TCP;
   7965 		else
   7966 			cmdc |= NQTXC_CMD_UDP;
   7967 
   7968 		cmdc |= NQTXC_CMD_IP4;
   7969 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7970 	}
   7971 	if (m0->m_pkthdr.csum_flags &
   7972 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7973 		WM_Q_EVCNT_INCR(txq, tusum6);
   7974 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7975 			cmdc |= NQTXC_CMD_TCP;
   7976 		else
   7977 			cmdc |= NQTXC_CMD_UDP;
   7978 
   7979 		cmdc |= NQTXC_CMD_IP6;
   7980 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7981 	}
   7982 
   7983 	/*
   7984 	 * We don't have to write context descriptor for every packet to
   7985 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7986 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7987 	 * controllers.
   7988 	 * It would be overhead to write context descriptor for every packet,
   7989 	 * however it does not cause problems.
   7990 	 */
   7991 	/* Fill in the context descriptor. */
   7992 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7993 	    htole32(vl_len);
   7994 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7995 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7996 	    htole32(cmdc);
   7997 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7998 	    htole32(mssidx);
   7999 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8000 	DPRINTF(WM_DEBUG_TX,
   8001 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8002 		txq->txq_next, 0, vl_len));
   8003 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8004 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8005 	txs->txs_ndesc++;
   8006 	return 0;
   8007 }
   8008 
   8009 /*
   8010  * wm_nq_start:		[ifnet interface function]
   8011  *
   8012  *	Start packet transmission on the interface for NEWQUEUE devices
   8013  */
   8014 static void
   8015 wm_nq_start(struct ifnet *ifp)
   8016 {
   8017 	struct wm_softc *sc = ifp->if_softc;
   8018 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8019 
   8020 #ifdef WM_MPSAFE
   8021 	KASSERT(if_is_mpsafe(ifp));
   8022 #endif
   8023 	/*
   8024 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   8025 	 */
   8026 
   8027 	mutex_enter(txq->txq_lock);
   8028 	if (!txq->txq_stopping)
   8029 		wm_nq_start_locked(ifp);
   8030 	mutex_exit(txq->txq_lock);
   8031 }
   8032 
   8033 static void
   8034 wm_nq_start_locked(struct ifnet *ifp)
   8035 {
   8036 	struct wm_softc *sc = ifp->if_softc;
   8037 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8038 
   8039 	wm_nq_send_common_locked(ifp, txq, false);
   8040 }
   8041 
   8042 static int
   8043 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8044 {
   8045 	int qid;
   8046 	struct wm_softc *sc = ifp->if_softc;
   8047 	struct wm_txqueue *txq;
   8048 
   8049 	qid = wm_select_txqueue(ifp, m);
   8050 	txq = &sc->sc_queue[qid].wmq_txq;
   8051 
   8052 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8053 		m_freem(m);
   8054 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8055 		return ENOBUFS;
   8056 	}
   8057 
   8058 	/*
   8059 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   8060 	 */
   8061 	ifp->if_obytes += m->m_pkthdr.len;
   8062 	if (m->m_flags & M_MCAST)
   8063 		ifp->if_omcasts++;
   8064 
   8065 	/*
   8066 	 * The situations which this mutex_tryenter() fails at running time
   8067 	 * are below two patterns.
   8068 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8069 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8070 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8071 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8072 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8073 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8074 	 * stuck, either.
   8075 	 */
   8076 	if (mutex_tryenter(txq->txq_lock)) {
   8077 		if (!txq->txq_stopping)
   8078 			wm_nq_transmit_locked(ifp, txq);
   8079 		mutex_exit(txq->txq_lock);
   8080 	}
   8081 
   8082 	return 0;
   8083 }
   8084 
   8085 static void
   8086 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8087 {
   8088 
   8089 	wm_nq_send_common_locked(ifp, txq, true);
   8090 }
   8091 
   8092 static void
   8093 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8094     bool is_transmit)
   8095 {
   8096 	struct wm_softc *sc = ifp->if_softc;
   8097 	struct mbuf *m0;
   8098 	struct wm_txsoft *txs;
   8099 	bus_dmamap_t dmamap;
   8100 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8101 	bool do_csum, sent;
   8102 	bool remap = true;
   8103 
   8104 	KASSERT(mutex_owned(txq->txq_lock));
   8105 
   8106 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8107 		return;
   8108 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   8109 		return;
   8110 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8111 		return;
   8112 
   8113 	sent = false;
   8114 
   8115 	/*
   8116 	 * Loop through the send queue, setting up transmit descriptors
   8117 	 * until we drain the queue, or use up all available transmit
   8118 	 * descriptors.
   8119 	 */
   8120 	for (;;) {
   8121 		m0 = NULL;
   8122 
   8123 		/* Get a work queue entry. */
   8124 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8125 			wm_txeof(txq, UINT_MAX);
   8126 			if (txq->txq_sfree == 0) {
   8127 				DPRINTF(WM_DEBUG_TX,
   8128 				    ("%s: TX: no free job descriptors\n",
   8129 					device_xname(sc->sc_dev)));
   8130 				WM_Q_EVCNT_INCR(txq, txsstall);
   8131 				break;
   8132 			}
   8133 		}
   8134 
   8135 		/* Grab a packet off the queue. */
   8136 		if (is_transmit)
   8137 			m0 = pcq_get(txq->txq_interq);
   8138 		else
   8139 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8140 		if (m0 == NULL)
   8141 			break;
   8142 
   8143 		DPRINTF(WM_DEBUG_TX,
   8144 		    ("%s: TX: have packet to transmit: %p\n",
   8145 		    device_xname(sc->sc_dev), m0));
   8146 
   8147 		txs = &txq->txq_soft[txq->txq_snext];
   8148 		dmamap = txs->txs_dmamap;
   8149 
   8150 		/*
   8151 		 * Load the DMA map.  If this fails, the packet either
   8152 		 * didn't fit in the allotted number of segments, or we
   8153 		 * were short on resources.  For the too-many-segments
   8154 		 * case, we simply report an error and drop the packet,
   8155 		 * since we can't sanely copy a jumbo packet to a single
   8156 		 * buffer.
   8157 		 */
   8158 retry:
   8159 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8160 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8161 		if (__predict_false(error)) {
   8162 			if (error == EFBIG) {
   8163 				if (remap == true) {
   8164 					struct mbuf *m;
   8165 
   8166 					remap = false;
   8167 					m = m_defrag(m0, M_NOWAIT);
   8168 					if (m != NULL) {
   8169 						WM_Q_EVCNT_INCR(txq, defrag);
   8170 						m0 = m;
   8171 						goto retry;
   8172 					}
   8173 				}
   8174 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8175 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8176 				    "DMA segments, dropping...\n",
   8177 				    device_xname(sc->sc_dev));
   8178 				wm_dump_mbuf_chain(sc, m0);
   8179 				m_freem(m0);
   8180 				continue;
   8181 			}
   8182 			/* Short on resources, just stop for now. */
   8183 			DPRINTF(WM_DEBUG_TX,
   8184 			    ("%s: TX: dmamap load failed: %d\n",
   8185 				device_xname(sc->sc_dev), error));
   8186 			break;
   8187 		}
   8188 
   8189 		segs_needed = dmamap->dm_nsegs;
   8190 
   8191 		/*
   8192 		 * Ensure we have enough descriptors free to describe
   8193 		 * the packet. Note, we always reserve one descriptor
   8194 		 * at the end of the ring due to the semantics of the
   8195 		 * TDT register, plus one more in the event we need
   8196 		 * to load offload context.
   8197 		 */
   8198 		if (segs_needed > txq->txq_free - 2) {
   8199 			/*
   8200 			 * Not enough free descriptors to transmit this
   8201 			 * packet.  We haven't committed anything yet,
   8202 			 * so just unload the DMA map, put the packet
   8203 			 * pack on the queue, and punt. Notify the upper
   8204 			 * layer that there are no more slots left.
   8205 			 */
   8206 			DPRINTF(WM_DEBUG_TX,
   8207 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8208 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8209 				segs_needed, txq->txq_free - 1));
   8210 			if (!is_transmit)
   8211 				ifp->if_flags |= IFF_OACTIVE;
   8212 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8213 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8214 			WM_Q_EVCNT_INCR(txq, txdstall);
   8215 			break;
   8216 		}
   8217 
   8218 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8219 
   8220 		DPRINTF(WM_DEBUG_TX,
   8221 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8222 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8223 
   8224 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8225 
   8226 		/*
   8227 		 * Store a pointer to the packet so that we can free it
   8228 		 * later.
   8229 		 *
   8230 		 * Initially, we consider the number of descriptors the
   8231 		 * packet uses the number of DMA segments.  This may be
   8232 		 * incremented by 1 if we do checksum offload (a descriptor
   8233 		 * is used to set the checksum context).
   8234 		 */
   8235 		txs->txs_mbuf = m0;
   8236 		txs->txs_firstdesc = txq->txq_next;
   8237 		txs->txs_ndesc = segs_needed;
   8238 
   8239 		/* Set up offload parameters for this packet. */
   8240 		uint32_t cmdlen, fields, dcmdlen;
   8241 		if (m0->m_pkthdr.csum_flags &
   8242 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8243 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8244 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8245 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8246 			    &do_csum) != 0) {
   8247 				/* Error message already displayed. */
   8248 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8249 				continue;
   8250 			}
   8251 		} else {
   8252 			do_csum = false;
   8253 			cmdlen = 0;
   8254 			fields = 0;
   8255 		}
   8256 
   8257 		/* Sync the DMA map. */
   8258 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8259 		    BUS_DMASYNC_PREWRITE);
   8260 
   8261 		/* Initialize the first transmit descriptor. */
   8262 		nexttx = txq->txq_next;
   8263 		if (!do_csum) {
   8264 			/* setup a legacy descriptor */
   8265 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8266 			    dmamap->dm_segs[0].ds_addr);
   8267 			txq->txq_descs[nexttx].wtx_cmdlen =
   8268 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8269 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8270 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8271 			if (vlan_has_tag(m0)) {
   8272 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8273 				    htole32(WTX_CMD_VLE);
   8274 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8275 				    htole16(vlan_get_tag(m0));
   8276 			} else
   8277 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8278 
   8279 			dcmdlen = 0;
   8280 		} else {
   8281 			/* setup an advanced data descriptor */
   8282 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8283 			    htole64(dmamap->dm_segs[0].ds_addr);
   8284 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8285 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8286 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8287 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8288 			    htole32(fields);
   8289 			DPRINTF(WM_DEBUG_TX,
   8290 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8291 				device_xname(sc->sc_dev), nexttx,
   8292 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8293 			DPRINTF(WM_DEBUG_TX,
   8294 			    ("\t 0x%08x%08x\n", fields,
   8295 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8296 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8297 		}
   8298 
   8299 		lasttx = nexttx;
   8300 		nexttx = WM_NEXTTX(txq, nexttx);
   8301 		/*
   8302 		 * fill in the next descriptors. legacy or advanced format
   8303 		 * is the same here
   8304 		 */
   8305 		for (seg = 1; seg < dmamap->dm_nsegs;
   8306 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8307 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8308 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8309 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8310 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8311 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8312 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8313 			lasttx = nexttx;
   8314 
   8315 			DPRINTF(WM_DEBUG_TX,
   8316 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8317 				device_xname(sc->sc_dev), nexttx,
   8318 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8319 				dmamap->dm_segs[seg].ds_len));
   8320 		}
   8321 
   8322 		KASSERT(lasttx != -1);
   8323 
   8324 		/*
   8325 		 * Set up the command byte on the last descriptor of
   8326 		 * the packet. If we're in the interrupt delay window,
   8327 		 * delay the interrupt.
   8328 		 */
   8329 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8330 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8331 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8332 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8333 
   8334 		txs->txs_lastdesc = lasttx;
   8335 
   8336 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8337 		    device_xname(sc->sc_dev),
   8338 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8339 
   8340 		/* Sync the descriptors we're using. */
   8341 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8342 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8343 
   8344 		/* Give the packet to the chip. */
   8345 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8346 		sent = true;
   8347 
   8348 		DPRINTF(WM_DEBUG_TX,
   8349 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8350 
   8351 		DPRINTF(WM_DEBUG_TX,
   8352 		    ("%s: TX: finished transmitting packet, job %d\n",
   8353 			device_xname(sc->sc_dev), txq->txq_snext));
   8354 
   8355 		/* Advance the tx pointer. */
   8356 		txq->txq_free -= txs->txs_ndesc;
   8357 		txq->txq_next = nexttx;
   8358 
   8359 		txq->txq_sfree--;
   8360 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8361 
   8362 		/* Pass the packet to any BPF listeners. */
   8363 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8364 	}
   8365 
   8366 	if (m0 != NULL) {
   8367 		if (!is_transmit)
   8368 			ifp->if_flags |= IFF_OACTIVE;
   8369 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8370 		WM_Q_EVCNT_INCR(txq, descdrop);
   8371 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8372 			__func__));
   8373 		m_freem(m0);
   8374 	}
   8375 
   8376 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8377 		/* No more slots; notify upper layer. */
   8378 		if (!is_transmit)
   8379 			ifp->if_flags |= IFF_OACTIVE;
   8380 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8381 	}
   8382 
   8383 	if (sent) {
   8384 		/* Set a watchdog timer in case the chip flakes out. */
   8385 		txq->txq_lastsent = time_uptime;
   8386 		txq->txq_sending = true;
   8387 	}
   8388 }
   8389 
   8390 static void
   8391 wm_deferred_start_locked(struct wm_txqueue *txq)
   8392 {
   8393 	struct wm_softc *sc = txq->txq_sc;
   8394 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8395 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8396 	int qid = wmq->wmq_id;
   8397 
   8398 	KASSERT(mutex_owned(txq->txq_lock));
   8399 
   8400 	if (txq->txq_stopping) {
   8401 		mutex_exit(txq->txq_lock);
   8402 		return;
   8403 	}
   8404 
   8405 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8406 		/* XXX need for ALTQ or one CPU system */
   8407 		if (qid == 0)
   8408 			wm_nq_start_locked(ifp);
   8409 		wm_nq_transmit_locked(ifp, txq);
   8410 	} else {
   8411 		/* XXX need for ALTQ or one CPU system */
   8412 		if (qid == 0)
   8413 			wm_start_locked(ifp);
   8414 		wm_transmit_locked(ifp, txq);
   8415 	}
   8416 }
   8417 
   8418 /* Interrupt */
   8419 
   8420 /*
   8421  * wm_txeof:
   8422  *
   8423  *	Helper; handle transmit interrupts.
   8424  */
   8425 static bool
   8426 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8427 {
   8428 	struct wm_softc *sc = txq->txq_sc;
   8429 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8430 	struct wm_txsoft *txs;
   8431 	int count = 0;
   8432 	int i;
   8433 	uint8_t status;
   8434 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8435 	bool more = false;
   8436 
   8437 	KASSERT(mutex_owned(txq->txq_lock));
   8438 
   8439 	if (txq->txq_stopping)
   8440 		return false;
   8441 
   8442 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8443 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8444 	if (wmq->wmq_id == 0)
   8445 		ifp->if_flags &= ~IFF_OACTIVE;
   8446 
   8447 	/*
   8448 	 * Go through the Tx list and free mbufs for those
   8449 	 * frames which have been transmitted.
   8450 	 */
   8451 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8452 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8453 		if (limit-- == 0) {
   8454 			more = true;
   8455 			DPRINTF(WM_DEBUG_TX,
   8456 			    ("%s: TX: loop limited, job %d is not processed\n",
   8457 				device_xname(sc->sc_dev), i));
   8458 			break;
   8459 		}
   8460 
   8461 		txs = &txq->txq_soft[i];
   8462 
   8463 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8464 			device_xname(sc->sc_dev), i));
   8465 
   8466 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8467 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8468 
   8469 		status =
   8470 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8471 		if ((status & WTX_ST_DD) == 0) {
   8472 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8473 			    BUS_DMASYNC_PREREAD);
   8474 			break;
   8475 		}
   8476 
   8477 		count++;
   8478 		DPRINTF(WM_DEBUG_TX,
   8479 		    ("%s: TX: job %d done: descs %d..%d\n",
   8480 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8481 		    txs->txs_lastdesc));
   8482 
   8483 		/*
   8484 		 * XXX We should probably be using the statistics
   8485 		 * XXX registers, but I don't know if they exist
   8486 		 * XXX on chips before the i82544.
   8487 		 */
   8488 
   8489 #ifdef WM_EVENT_COUNTERS
   8490 		if (status & WTX_ST_TU)
   8491 			WM_Q_EVCNT_INCR(txq, underrun);
   8492 #endif /* WM_EVENT_COUNTERS */
   8493 
   8494 		/*
   8495 		 * 82574 and newer's document says the status field has neither
   8496 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8497 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8498 		 * Developer's Manual", 82574 datasheet and newer.
   8499 		 *
   8500 		 * XXX I saw the LC bit was set on I218 even though the media
   8501 		 * was full duplex, so the bit might be used for other
   8502 		 * meaning ...(I have no document).
   8503 		 */
   8504 
   8505 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8506 		    && ((sc->sc_type < WM_T_82574)
   8507 			|| (sc->sc_type == WM_T_80003))) {
   8508 			ifp->if_oerrors++;
   8509 			if (status & WTX_ST_LC)
   8510 				log(LOG_WARNING, "%s: late collision\n",
   8511 				    device_xname(sc->sc_dev));
   8512 			else if (status & WTX_ST_EC) {
   8513 				ifp->if_collisions +=
   8514 				    TX_COLLISION_THRESHOLD + 1;
   8515 				log(LOG_WARNING, "%s: excessive collisions\n",
   8516 				    device_xname(sc->sc_dev));
   8517 			}
   8518 		} else
   8519 			ifp->if_opackets++;
   8520 
   8521 		txq->txq_packets++;
   8522 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8523 
   8524 		txq->txq_free += txs->txs_ndesc;
   8525 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8526 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8527 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8528 		m_freem(txs->txs_mbuf);
   8529 		txs->txs_mbuf = NULL;
   8530 	}
   8531 
   8532 	/* Update the dirty transmit buffer pointer. */
   8533 	txq->txq_sdirty = i;
   8534 	DPRINTF(WM_DEBUG_TX,
   8535 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8536 
   8537 	if (count != 0)
   8538 		rnd_add_uint32(&sc->rnd_source, count);
   8539 
   8540 	/*
   8541 	 * If there are no more pending transmissions, cancel the watchdog
   8542 	 * timer.
   8543 	 */
   8544 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8545 		txq->txq_sending = false;
   8546 
   8547 	return more;
   8548 }
   8549 
   8550 static inline uint32_t
   8551 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8552 {
   8553 	struct wm_softc *sc = rxq->rxq_sc;
   8554 
   8555 	if (sc->sc_type == WM_T_82574)
   8556 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8557 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8558 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8559 	else
   8560 		return rxq->rxq_descs[idx].wrx_status;
   8561 }
   8562 
   8563 static inline uint32_t
   8564 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8565 {
   8566 	struct wm_softc *sc = rxq->rxq_sc;
   8567 
   8568 	if (sc->sc_type == WM_T_82574)
   8569 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8570 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8571 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8572 	else
   8573 		return rxq->rxq_descs[idx].wrx_errors;
   8574 }
   8575 
   8576 static inline uint16_t
   8577 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8578 {
   8579 	struct wm_softc *sc = rxq->rxq_sc;
   8580 
   8581 	if (sc->sc_type == WM_T_82574)
   8582 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8583 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8584 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8585 	else
   8586 		return rxq->rxq_descs[idx].wrx_special;
   8587 }
   8588 
   8589 static inline int
   8590 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8591 {
   8592 	struct wm_softc *sc = rxq->rxq_sc;
   8593 
   8594 	if (sc->sc_type == WM_T_82574)
   8595 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8596 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8597 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8598 	else
   8599 		return rxq->rxq_descs[idx].wrx_len;
   8600 }
   8601 
   8602 #ifdef WM_DEBUG
   8603 static inline uint32_t
   8604 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8605 {
   8606 	struct wm_softc *sc = rxq->rxq_sc;
   8607 
   8608 	if (sc->sc_type == WM_T_82574)
   8609 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8610 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8611 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8612 	else
   8613 		return 0;
   8614 }
   8615 
   8616 static inline uint8_t
   8617 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8618 {
   8619 	struct wm_softc *sc = rxq->rxq_sc;
   8620 
   8621 	if (sc->sc_type == WM_T_82574)
   8622 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8623 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8624 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8625 	else
   8626 		return 0;
   8627 }
   8628 #endif /* WM_DEBUG */
   8629 
   8630 static inline bool
   8631 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8632     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8633 {
   8634 
   8635 	if (sc->sc_type == WM_T_82574)
   8636 		return (status & ext_bit) != 0;
   8637 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8638 		return (status & nq_bit) != 0;
   8639 	else
   8640 		return (status & legacy_bit) != 0;
   8641 }
   8642 
   8643 static inline bool
   8644 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8645     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8646 {
   8647 
   8648 	if (sc->sc_type == WM_T_82574)
   8649 		return (error & ext_bit) != 0;
   8650 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8651 		return (error & nq_bit) != 0;
   8652 	else
   8653 		return (error & legacy_bit) != 0;
   8654 }
   8655 
   8656 static inline bool
   8657 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8658 {
   8659 
   8660 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8661 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8662 		return true;
   8663 	else
   8664 		return false;
   8665 }
   8666 
   8667 static inline bool
   8668 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8669 {
   8670 	struct wm_softc *sc = rxq->rxq_sc;
   8671 
   8672 	/* XXXX missing error bit for newqueue? */
   8673 	if (wm_rxdesc_is_set_error(sc, errors,
   8674 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8675 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8676 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8677 		NQRXC_ERROR_RXE)) {
   8678 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8679 		    EXTRXC_ERROR_SE, 0))
   8680 			log(LOG_WARNING, "%s: symbol error\n",
   8681 			    device_xname(sc->sc_dev));
   8682 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8683 		    EXTRXC_ERROR_SEQ, 0))
   8684 			log(LOG_WARNING, "%s: receive sequence error\n",
   8685 			    device_xname(sc->sc_dev));
   8686 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8687 		    EXTRXC_ERROR_CE, 0))
   8688 			log(LOG_WARNING, "%s: CRC error\n",
   8689 			    device_xname(sc->sc_dev));
   8690 		return true;
   8691 	}
   8692 
   8693 	return false;
   8694 }
   8695 
   8696 static inline bool
   8697 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8698 {
   8699 	struct wm_softc *sc = rxq->rxq_sc;
   8700 
   8701 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8702 		NQRXC_STATUS_DD)) {
   8703 		/* We have processed all of the receive descriptors. */
   8704 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8705 		return false;
   8706 	}
   8707 
   8708 	return true;
   8709 }
   8710 
   8711 static inline bool
   8712 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8713     uint16_t vlantag, struct mbuf *m)
   8714 {
   8715 
   8716 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8717 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8718 		vlan_set_tag(m, le16toh(vlantag));
   8719 	}
   8720 
   8721 	return true;
   8722 }
   8723 
   8724 static inline void
   8725 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8726     uint32_t errors, struct mbuf *m)
   8727 {
   8728 	struct wm_softc *sc = rxq->rxq_sc;
   8729 
   8730 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8731 		if (wm_rxdesc_is_set_status(sc, status,
   8732 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8733 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8734 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8735 			if (wm_rxdesc_is_set_error(sc, errors,
   8736 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8737 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8738 		}
   8739 		if (wm_rxdesc_is_set_status(sc, status,
   8740 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8741 			/*
   8742 			 * Note: we don't know if this was TCP or UDP,
   8743 			 * so we just set both bits, and expect the
   8744 			 * upper layers to deal.
   8745 			 */
   8746 			WM_Q_EVCNT_INCR(rxq, tusum);
   8747 			m->m_pkthdr.csum_flags |=
   8748 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8749 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8750 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8751 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8752 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8753 		}
   8754 	}
   8755 }
   8756 
   8757 /*
   8758  * wm_rxeof:
   8759  *
   8760  *	Helper; handle receive interrupts.
   8761  */
   8762 static bool
   8763 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8764 {
   8765 	struct wm_softc *sc = rxq->rxq_sc;
   8766 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8767 	struct wm_rxsoft *rxs;
   8768 	struct mbuf *m;
   8769 	int i, len;
   8770 	int count = 0;
   8771 	uint32_t status, errors;
   8772 	uint16_t vlantag;
   8773 	bool more = false;
   8774 
   8775 	KASSERT(mutex_owned(rxq->rxq_lock));
   8776 
   8777 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8778 		if (limit-- == 0) {
   8779 			rxq->rxq_ptr = i;
   8780 			more = true;
   8781 			DPRINTF(WM_DEBUG_RX,
   8782 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8783 				device_xname(sc->sc_dev), i));
   8784 			break;
   8785 		}
   8786 
   8787 		rxs = &rxq->rxq_soft[i];
   8788 
   8789 		DPRINTF(WM_DEBUG_RX,
   8790 		    ("%s: RX: checking descriptor %d\n",
   8791 			device_xname(sc->sc_dev), i));
   8792 		wm_cdrxsync(rxq, i,
   8793 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8794 
   8795 		status = wm_rxdesc_get_status(rxq, i);
   8796 		errors = wm_rxdesc_get_errors(rxq, i);
   8797 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8798 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8799 #ifdef WM_DEBUG
   8800 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8801 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8802 #endif
   8803 
   8804 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8805 			/*
   8806 			 * Update the receive pointer holding rxq_lock
   8807 			 * consistent with increment counter.
   8808 			 */
   8809 			rxq->rxq_ptr = i;
   8810 			break;
   8811 		}
   8812 
   8813 		count++;
   8814 		if (__predict_false(rxq->rxq_discard)) {
   8815 			DPRINTF(WM_DEBUG_RX,
   8816 			    ("%s: RX: discarding contents of descriptor %d\n",
   8817 				device_xname(sc->sc_dev), i));
   8818 			wm_init_rxdesc(rxq, i);
   8819 			if (wm_rxdesc_is_eop(rxq, status)) {
   8820 				/* Reset our state. */
   8821 				DPRINTF(WM_DEBUG_RX,
   8822 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8823 					device_xname(sc->sc_dev)));
   8824 				rxq->rxq_discard = 0;
   8825 			}
   8826 			continue;
   8827 		}
   8828 
   8829 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8830 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8831 
   8832 		m = rxs->rxs_mbuf;
   8833 
   8834 		/*
   8835 		 * Add a new receive buffer to the ring, unless of
   8836 		 * course the length is zero. Treat the latter as a
   8837 		 * failed mapping.
   8838 		 */
   8839 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8840 			/*
   8841 			 * Failed, throw away what we've done so
   8842 			 * far, and discard the rest of the packet.
   8843 			 */
   8844 			ifp->if_ierrors++;
   8845 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8846 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8847 			wm_init_rxdesc(rxq, i);
   8848 			if (!wm_rxdesc_is_eop(rxq, status))
   8849 				rxq->rxq_discard = 1;
   8850 			if (rxq->rxq_head != NULL)
   8851 				m_freem(rxq->rxq_head);
   8852 			WM_RXCHAIN_RESET(rxq);
   8853 			DPRINTF(WM_DEBUG_RX,
   8854 			    ("%s: RX: Rx buffer allocation failed, "
   8855 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8856 				rxq->rxq_discard ? " (discard)" : ""));
   8857 			continue;
   8858 		}
   8859 
   8860 		m->m_len = len;
   8861 		rxq->rxq_len += len;
   8862 		DPRINTF(WM_DEBUG_RX,
   8863 		    ("%s: RX: buffer at %p len %d\n",
   8864 			device_xname(sc->sc_dev), m->m_data, len));
   8865 
   8866 		/* If this is not the end of the packet, keep looking. */
   8867 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8868 			WM_RXCHAIN_LINK(rxq, m);
   8869 			DPRINTF(WM_DEBUG_RX,
   8870 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8871 				device_xname(sc->sc_dev), rxq->rxq_len));
   8872 			continue;
   8873 		}
   8874 
   8875 		/*
   8876 		 * Okay, we have the entire packet now. The chip is
   8877 		 * configured to include the FCS except I350 and I21[01]
   8878 		 * (not all chips can be configured to strip it),
   8879 		 * so we need to trim it.
   8880 		 * May need to adjust length of previous mbuf in the
   8881 		 * chain if the current mbuf is too short.
   8882 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8883 		 * is always set in I350, so we don't trim it.
   8884 		 */
   8885 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8886 		    && (sc->sc_type != WM_T_I210)
   8887 		    && (sc->sc_type != WM_T_I211)) {
   8888 			if (m->m_len < ETHER_CRC_LEN) {
   8889 				rxq->rxq_tail->m_len
   8890 				    -= (ETHER_CRC_LEN - m->m_len);
   8891 				m->m_len = 0;
   8892 			} else
   8893 				m->m_len -= ETHER_CRC_LEN;
   8894 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8895 		} else
   8896 			len = rxq->rxq_len;
   8897 
   8898 		WM_RXCHAIN_LINK(rxq, m);
   8899 
   8900 		*rxq->rxq_tailp = NULL;
   8901 		m = rxq->rxq_head;
   8902 
   8903 		WM_RXCHAIN_RESET(rxq);
   8904 
   8905 		DPRINTF(WM_DEBUG_RX,
   8906 		    ("%s: RX: have entire packet, len -> %d\n",
   8907 			device_xname(sc->sc_dev), len));
   8908 
   8909 		/* If an error occurred, update stats and drop the packet. */
   8910 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8911 			m_freem(m);
   8912 			continue;
   8913 		}
   8914 
   8915 		/* No errors.  Receive the packet. */
   8916 		m_set_rcvif(m, ifp);
   8917 		m->m_pkthdr.len = len;
   8918 		/*
   8919 		 * TODO
   8920 		 * should be save rsshash and rsstype to this mbuf.
   8921 		 */
   8922 		DPRINTF(WM_DEBUG_RX,
   8923 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8924 			device_xname(sc->sc_dev), rsstype, rsshash));
   8925 
   8926 		/*
   8927 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8928 		 * for us.  Associate the tag with the packet.
   8929 		 */
   8930 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8931 			continue;
   8932 
   8933 		/* Set up checksum info for this packet. */
   8934 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8935 		/*
   8936 		 * Update the receive pointer holding rxq_lock consistent with
   8937 		 * increment counter.
   8938 		 */
   8939 		rxq->rxq_ptr = i;
   8940 		rxq->rxq_packets++;
   8941 		rxq->rxq_bytes += len;
   8942 		mutex_exit(rxq->rxq_lock);
   8943 
   8944 		/* Pass it on. */
   8945 		if_percpuq_enqueue(sc->sc_ipq, m);
   8946 
   8947 		mutex_enter(rxq->rxq_lock);
   8948 
   8949 		if (rxq->rxq_stopping)
   8950 			break;
   8951 	}
   8952 
   8953 	if (count != 0)
   8954 		rnd_add_uint32(&sc->rnd_source, count);
   8955 
   8956 	DPRINTF(WM_DEBUG_RX,
   8957 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8958 
   8959 	return more;
   8960 }
   8961 
   8962 /*
   8963  * wm_linkintr_gmii:
   8964  *
   8965  *	Helper; handle link interrupts for GMII.
   8966  */
   8967 static void
   8968 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8969 {
   8970 	device_t dev = sc->sc_dev;
   8971 	uint32_t status, reg;
   8972 	bool link;
   8973 	int rv;
   8974 
   8975 	KASSERT(WM_CORE_LOCKED(sc));
   8976 
   8977 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   8978 		__func__));
   8979 
   8980 	if ((icr & ICR_LSC) == 0) {
   8981 		if (icr & ICR_RXSEQ)
   8982 			DPRINTF(WM_DEBUG_LINK,
   8983 			    ("%s: LINK Receive sequence error\n",
   8984 				device_xname(dev)));
   8985 		return;
   8986 	}
   8987 
   8988 	/* Link status changed */
   8989 	status = CSR_READ(sc, WMREG_STATUS);
   8990 	link = status & STATUS_LU;
   8991 	if (link) {
   8992 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8993 			device_xname(dev),
   8994 			(status & STATUS_FD) ? "FDX" : "HDX"));
   8995 	} else {
   8996 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8997 			device_xname(dev)));
   8998 	}
   8999 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9000 		wm_gig_downshift_workaround_ich8lan(sc);
   9001 
   9002 	if ((sc->sc_type == WM_T_ICH8)
   9003 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9004 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9005 	}
   9006 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9007 		device_xname(dev)));
   9008 	mii_pollstat(&sc->sc_mii);
   9009 	if (sc->sc_type == WM_T_82543) {
   9010 		int miistatus, active;
   9011 
   9012 		/*
   9013 		 * With 82543, we need to force speed and
   9014 		 * duplex on the MAC equal to what the PHY
   9015 		 * speed and duplex configuration is.
   9016 		 */
   9017 		miistatus = sc->sc_mii.mii_media_status;
   9018 
   9019 		if (miistatus & IFM_ACTIVE) {
   9020 			active = sc->sc_mii.mii_media_active;
   9021 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9022 			switch (IFM_SUBTYPE(active)) {
   9023 			case IFM_10_T:
   9024 				sc->sc_ctrl |= CTRL_SPEED_10;
   9025 				break;
   9026 			case IFM_100_TX:
   9027 				sc->sc_ctrl |= CTRL_SPEED_100;
   9028 				break;
   9029 			case IFM_1000_T:
   9030 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9031 				break;
   9032 			default:
   9033 				/*
   9034 				 * fiber?
   9035 				 * Shoud not enter here.
   9036 				 */
   9037 				printf("unknown media (%x)\n", active);
   9038 				break;
   9039 			}
   9040 			if (active & IFM_FDX)
   9041 				sc->sc_ctrl |= CTRL_FD;
   9042 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9043 		}
   9044 	} else if (sc->sc_type == WM_T_PCH) {
   9045 		wm_k1_gig_workaround_hv(sc,
   9046 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9047 	}
   9048 
   9049 	/*
   9050 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9051 	 * aggressive resulting in many collisions. To avoid this, increase
   9052 	 * the IPG and reduce Rx latency in the PHY.
   9053 	 */
   9054 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9055 	    && link) {
   9056 		uint32_t tipg_reg;
   9057 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9058 		bool fdx;
   9059 		uint16_t emi_addr, emi_val;
   9060 
   9061 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9062 		tipg_reg &= ~TIPG_IPGT_MASK;
   9063 		fdx = status & STATUS_FD;
   9064 
   9065 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9066 			tipg_reg |= 0xff;
   9067 			/* Reduce Rx latency in analog PHY */
   9068 			emi_val = 0;
   9069 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9070 		    fdx && speed != STATUS_SPEED_1000) {
   9071 			tipg_reg |= 0xc;
   9072 			emi_val = 1;
   9073 		} else {
   9074 			/* Roll back the default values */
   9075 			tipg_reg |= 0x08;
   9076 			emi_val = 1;
   9077 		}
   9078 
   9079 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9080 
   9081 		rv = sc->phy.acquire(sc);
   9082 		if (rv)
   9083 			return;
   9084 
   9085 		if (sc->sc_type == WM_T_PCH2)
   9086 			emi_addr = I82579_RX_CONFIG;
   9087 		else
   9088 			emi_addr = I217_RX_CONFIG;
   9089 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9090 
   9091 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9092 			uint16_t phy_reg;
   9093 
   9094 			sc->phy.readreg_locked(dev, 2,
   9095 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9096 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9097 			if (speed == STATUS_SPEED_100
   9098 			    || speed == STATUS_SPEED_10)
   9099 				phy_reg |= 0x3e8;
   9100 			else
   9101 				phy_reg |= 0xfa;
   9102 			sc->phy.writereg_locked(dev, 2,
   9103 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9104 
   9105 			if (speed == STATUS_SPEED_1000) {
   9106 				sc->phy.readreg_locked(dev, 2,
   9107 				    HV_PM_CTRL, &phy_reg);
   9108 
   9109 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9110 
   9111 				sc->phy.writereg_locked(dev, 2,
   9112 				    HV_PM_CTRL, phy_reg);
   9113 			}
   9114 		}
   9115 		sc->phy.release(sc);
   9116 
   9117 		if (rv)
   9118 			return;
   9119 
   9120 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9121 			uint16_t data, ptr_gap;
   9122 
   9123 			if (speed == STATUS_SPEED_1000) {
   9124 				rv = sc->phy.acquire(sc);
   9125 				if (rv)
   9126 					return;
   9127 
   9128 				rv = sc->phy.readreg_locked(dev, 2,
   9129 				    I219_UNKNOWN1, &data);
   9130 				if (rv) {
   9131 					sc->phy.release(sc);
   9132 					return;
   9133 				}
   9134 
   9135 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9136 				if (ptr_gap < 0x18) {
   9137 					data &= ~(0x3ff << 2);
   9138 					data |= (0x18 << 2);
   9139 					rv = sc->phy.writereg_locked(dev,
   9140 					    2, I219_UNKNOWN1, data);
   9141 				}
   9142 				sc->phy.release(sc);
   9143 				if (rv)
   9144 					return;
   9145 			} else {
   9146 				rv = sc->phy.acquire(sc);
   9147 				if (rv)
   9148 					return;
   9149 
   9150 				rv = sc->phy.writereg_locked(dev, 2,
   9151 				    I219_UNKNOWN1, 0xc023);
   9152 				sc->phy.release(sc);
   9153 				if (rv)
   9154 					return;
   9155 
   9156 			}
   9157 		}
   9158 	}
   9159 
   9160 	/*
   9161 	 * I217 Packet Loss issue:
   9162 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9163 	 * on power up.
   9164 	 * Set the Beacon Duration for I217 to 8 usec
   9165 	 */
   9166 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9167 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9168 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9169 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9170 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9171 	}
   9172 
   9173 	/* Work-around I218 hang issue */
   9174 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9175 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9176 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9177 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9178 		wm_k1_workaround_lpt_lp(sc, link);
   9179 
   9180 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9181 		/*
   9182 		 * Set platform power management values for Latency
   9183 		 * Tolerance Reporting (LTR)
   9184 		 */
   9185 		wm_platform_pm_pch_lpt(sc,
   9186 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9187 	}
   9188 
   9189 	/* Clear link partner's EEE ability */
   9190 	sc->eee_lp_ability = 0;
   9191 
   9192 	/* FEXTNVM6 K1-off workaround */
   9193 	if (sc->sc_type == WM_T_PCH_SPT) {
   9194 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9195 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9196 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9197 		else
   9198 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9199 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9200 	}
   9201 
   9202 	if (!link)
   9203 		return;
   9204 
   9205 	switch (sc->sc_type) {
   9206 	case WM_T_PCH2:
   9207 		wm_k1_workaround_lv(sc);
   9208 		/* FALLTHROUGH */
   9209 	case WM_T_PCH:
   9210 		if (sc->sc_phytype == WMPHY_82578)
   9211 			wm_link_stall_workaround_hv(sc);
   9212 		break;
   9213 	default:
   9214 		break;
   9215 	}
   9216 
   9217 	/* Enable/Disable EEE after link up */
   9218 	if (sc->sc_phytype > WMPHY_82579)
   9219 		wm_set_eee_pchlan(sc);
   9220 }
   9221 
   9222 /*
   9223  * wm_linkintr_tbi:
   9224  *
   9225  *	Helper; handle link interrupts for TBI mode.
   9226  */
   9227 static void
   9228 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9229 {
   9230 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9231 	uint32_t status;
   9232 
   9233 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9234 		__func__));
   9235 
   9236 	status = CSR_READ(sc, WMREG_STATUS);
   9237 	if (icr & ICR_LSC) {
   9238 		wm_check_for_link(sc);
   9239 		if (status & STATUS_LU) {
   9240 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9241 				device_xname(sc->sc_dev),
   9242 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9243 			/*
   9244 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9245 			 * so we should update sc->sc_ctrl
   9246 			 */
   9247 
   9248 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9249 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9250 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9251 			if (status & STATUS_FD)
   9252 				sc->sc_tctl |=
   9253 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9254 			else
   9255 				sc->sc_tctl |=
   9256 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9257 			if (sc->sc_ctrl & CTRL_TFCE)
   9258 				sc->sc_fcrtl |= FCRTL_XONE;
   9259 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9260 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9261 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9262 			sc->sc_tbi_linkup = 1;
   9263 			if_link_state_change(ifp, LINK_STATE_UP);
   9264 		} else {
   9265 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9266 				device_xname(sc->sc_dev)));
   9267 			sc->sc_tbi_linkup = 0;
   9268 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9269 		}
   9270 		/* Update LED */
   9271 		wm_tbi_serdes_set_linkled(sc);
   9272 	} else if (icr & ICR_RXSEQ)
   9273 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9274 			device_xname(sc->sc_dev)));
   9275 }
   9276 
   9277 /*
   9278  * wm_linkintr_serdes:
   9279  *
   9280  *	Helper; handle link interrupts for TBI mode.
   9281  */
   9282 static void
   9283 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9284 {
   9285 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9286 	struct mii_data *mii = &sc->sc_mii;
   9287 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9288 	uint32_t pcs_adv, pcs_lpab, reg;
   9289 
   9290 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9291 		__func__));
   9292 
   9293 	if (icr & ICR_LSC) {
   9294 		/* Check PCS */
   9295 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9296 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9297 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9298 				device_xname(sc->sc_dev)));
   9299 			mii->mii_media_status |= IFM_ACTIVE;
   9300 			sc->sc_tbi_linkup = 1;
   9301 			if_link_state_change(ifp, LINK_STATE_UP);
   9302 		} else {
   9303 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9304 				device_xname(sc->sc_dev)));
   9305 			mii->mii_media_status |= IFM_NONE;
   9306 			sc->sc_tbi_linkup = 0;
   9307 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9308 			wm_tbi_serdes_set_linkled(sc);
   9309 			return;
   9310 		}
   9311 		mii->mii_media_active |= IFM_1000_SX;
   9312 		if ((reg & PCS_LSTS_FDX) != 0)
   9313 			mii->mii_media_active |= IFM_FDX;
   9314 		else
   9315 			mii->mii_media_active |= IFM_HDX;
   9316 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9317 			/* Check flow */
   9318 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9319 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9320 				DPRINTF(WM_DEBUG_LINK,
   9321 				    ("XXX LINKOK but not ACOMP\n"));
   9322 				return;
   9323 			}
   9324 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9325 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9326 			DPRINTF(WM_DEBUG_LINK,
   9327 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9328 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9329 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9330 				mii->mii_media_active |= IFM_FLOW
   9331 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9332 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9333 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9334 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9335 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9336 				mii->mii_media_active |= IFM_FLOW
   9337 				    | IFM_ETH_TXPAUSE;
   9338 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9339 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9340 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9341 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9342 				mii->mii_media_active |= IFM_FLOW
   9343 				    | IFM_ETH_RXPAUSE;
   9344 		}
   9345 		/* Update LED */
   9346 		wm_tbi_serdes_set_linkled(sc);
   9347 	} else
   9348 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9349 		    device_xname(sc->sc_dev)));
   9350 }
   9351 
   9352 /*
   9353  * wm_linkintr:
   9354  *
   9355  *	Helper; handle link interrupts.
   9356  */
   9357 static void
   9358 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9359 {
   9360 
   9361 	KASSERT(WM_CORE_LOCKED(sc));
   9362 
   9363 	if (sc->sc_flags & WM_F_HAS_MII)
   9364 		wm_linkintr_gmii(sc, icr);
   9365 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9366 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9367 		wm_linkintr_serdes(sc, icr);
   9368 	else
   9369 		wm_linkintr_tbi(sc, icr);
   9370 }
   9371 
   9372 /*
   9373  * wm_intr_legacy:
   9374  *
   9375  *	Interrupt service routine for INTx and MSI.
   9376  */
   9377 static int
   9378 wm_intr_legacy(void *arg)
   9379 {
   9380 	struct wm_softc *sc = arg;
   9381 	struct wm_queue *wmq = &sc->sc_queue[0];
   9382 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9383 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9384 	uint32_t icr, rndval = 0;
   9385 	int handled = 0;
   9386 
   9387 	while (1 /* CONSTCOND */) {
   9388 		icr = CSR_READ(sc, WMREG_ICR);
   9389 		if ((icr & sc->sc_icr) == 0)
   9390 			break;
   9391 		if (handled == 0)
   9392 			DPRINTF(WM_DEBUG_TX,
   9393 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9394 		if (rndval == 0)
   9395 			rndval = icr;
   9396 
   9397 		mutex_enter(rxq->rxq_lock);
   9398 
   9399 		if (rxq->rxq_stopping) {
   9400 			mutex_exit(rxq->rxq_lock);
   9401 			break;
   9402 		}
   9403 
   9404 		handled = 1;
   9405 
   9406 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9407 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9408 			DPRINTF(WM_DEBUG_RX,
   9409 			    ("%s: RX: got Rx intr 0x%08x\n",
   9410 				device_xname(sc->sc_dev),
   9411 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9412 			WM_Q_EVCNT_INCR(rxq, intr);
   9413 		}
   9414 #endif
   9415 		/*
   9416 		 * wm_rxeof() does *not* call upper layer functions directly,
   9417 		 * as if_percpuq_enqueue() just call softint_schedule().
   9418 		 * So, we can call wm_rxeof() in interrupt context.
   9419 		 */
   9420 		wm_rxeof(rxq, UINT_MAX);
   9421 
   9422 		mutex_exit(rxq->rxq_lock);
   9423 		mutex_enter(txq->txq_lock);
   9424 
   9425 		if (txq->txq_stopping) {
   9426 			mutex_exit(txq->txq_lock);
   9427 			break;
   9428 		}
   9429 
   9430 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9431 		if (icr & ICR_TXDW) {
   9432 			DPRINTF(WM_DEBUG_TX,
   9433 			    ("%s: TX: got TXDW interrupt\n",
   9434 				device_xname(sc->sc_dev)));
   9435 			WM_Q_EVCNT_INCR(txq, txdw);
   9436 		}
   9437 #endif
   9438 		wm_txeof(txq, UINT_MAX);
   9439 
   9440 		mutex_exit(txq->txq_lock);
   9441 		WM_CORE_LOCK(sc);
   9442 
   9443 		if (sc->sc_core_stopping) {
   9444 			WM_CORE_UNLOCK(sc);
   9445 			break;
   9446 		}
   9447 
   9448 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9449 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9450 			wm_linkintr(sc, icr);
   9451 		}
   9452 
   9453 		WM_CORE_UNLOCK(sc);
   9454 
   9455 		if (icr & ICR_RXO) {
   9456 #if defined(WM_DEBUG)
   9457 			log(LOG_WARNING, "%s: Receive overrun\n",
   9458 			    device_xname(sc->sc_dev));
   9459 #endif /* defined(WM_DEBUG) */
   9460 		}
   9461 	}
   9462 
   9463 	rnd_add_uint32(&sc->rnd_source, rndval);
   9464 
   9465 	if (handled) {
   9466 		/* Try to get more packets going. */
   9467 		softint_schedule(wmq->wmq_si);
   9468 	}
   9469 
   9470 	return handled;
   9471 }
   9472 
   9473 static inline void
   9474 wm_txrxintr_disable(struct wm_queue *wmq)
   9475 {
   9476 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9477 
   9478 	if (sc->sc_type == WM_T_82574)
   9479 		CSR_WRITE(sc, WMREG_IMC,
   9480 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9481 	else if (sc->sc_type == WM_T_82575)
   9482 		CSR_WRITE(sc, WMREG_EIMC,
   9483 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9484 	else
   9485 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9486 }
   9487 
   9488 static inline void
   9489 wm_txrxintr_enable(struct wm_queue *wmq)
   9490 {
   9491 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9492 
   9493 	wm_itrs_calculate(sc, wmq);
   9494 
   9495 	/*
   9496 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9497 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9498 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9499 	 * while each wm_handle_queue(wmq) is runnig.
   9500 	 */
   9501 	if (sc->sc_type == WM_T_82574)
   9502 		CSR_WRITE(sc, WMREG_IMS,
   9503 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9504 	else if (sc->sc_type == WM_T_82575)
   9505 		CSR_WRITE(sc, WMREG_EIMS,
   9506 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9507 	else
   9508 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9509 }
   9510 
   9511 static int
   9512 wm_txrxintr_msix(void *arg)
   9513 {
   9514 	struct wm_queue *wmq = arg;
   9515 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9516 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9517 	struct wm_softc *sc = txq->txq_sc;
   9518 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9519 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9520 	bool txmore;
   9521 	bool rxmore;
   9522 
   9523 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9524 
   9525 	DPRINTF(WM_DEBUG_TX,
   9526 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9527 
   9528 	wm_txrxintr_disable(wmq);
   9529 
   9530 	mutex_enter(txq->txq_lock);
   9531 
   9532 	if (txq->txq_stopping) {
   9533 		mutex_exit(txq->txq_lock);
   9534 		return 0;
   9535 	}
   9536 
   9537 	WM_Q_EVCNT_INCR(txq, txdw);
   9538 	txmore = wm_txeof(txq, txlimit);
   9539 	/* wm_deferred start() is done in wm_handle_queue(). */
   9540 	mutex_exit(txq->txq_lock);
   9541 
   9542 	DPRINTF(WM_DEBUG_RX,
   9543 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9544 	mutex_enter(rxq->rxq_lock);
   9545 
   9546 	if (rxq->rxq_stopping) {
   9547 		mutex_exit(rxq->rxq_lock);
   9548 		return 0;
   9549 	}
   9550 
   9551 	WM_Q_EVCNT_INCR(rxq, intr);
   9552 	rxmore = wm_rxeof(rxq, rxlimit);
   9553 	mutex_exit(rxq->rxq_lock);
   9554 
   9555 	wm_itrs_writereg(sc, wmq);
   9556 
   9557 	if (txmore || rxmore)
   9558 		softint_schedule(wmq->wmq_si);
   9559 	else
   9560 		wm_txrxintr_enable(wmq);
   9561 
   9562 	return 1;
   9563 }
   9564 
   9565 static void
   9566 wm_handle_queue(void *arg)
   9567 {
   9568 	struct wm_queue *wmq = arg;
   9569 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9570 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9571 	struct wm_softc *sc = txq->txq_sc;
   9572 	u_int txlimit = sc->sc_tx_process_limit;
   9573 	u_int rxlimit = sc->sc_rx_process_limit;
   9574 	bool txmore;
   9575 	bool rxmore;
   9576 
   9577 	mutex_enter(txq->txq_lock);
   9578 	if (txq->txq_stopping) {
   9579 		mutex_exit(txq->txq_lock);
   9580 		return;
   9581 	}
   9582 	txmore = wm_txeof(txq, txlimit);
   9583 	wm_deferred_start_locked(txq);
   9584 	mutex_exit(txq->txq_lock);
   9585 
   9586 	mutex_enter(rxq->rxq_lock);
   9587 	if (rxq->rxq_stopping) {
   9588 		mutex_exit(rxq->rxq_lock);
   9589 		return;
   9590 	}
   9591 	WM_Q_EVCNT_INCR(rxq, defer);
   9592 	rxmore = wm_rxeof(rxq, rxlimit);
   9593 	mutex_exit(rxq->rxq_lock);
   9594 
   9595 	if (txmore || rxmore)
   9596 		softint_schedule(wmq->wmq_si);
   9597 	else
   9598 		wm_txrxintr_enable(wmq);
   9599 }
   9600 
   9601 /*
   9602  * wm_linkintr_msix:
   9603  *
   9604  *	Interrupt service routine for link status change for MSI-X.
   9605  */
   9606 static int
   9607 wm_linkintr_msix(void *arg)
   9608 {
   9609 	struct wm_softc *sc = arg;
   9610 	uint32_t reg;
   9611 	bool has_rxo;
   9612 
   9613 	DPRINTF(WM_DEBUG_LINK,
   9614 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9615 
   9616 	reg = CSR_READ(sc, WMREG_ICR);
   9617 	WM_CORE_LOCK(sc);
   9618 	if (sc->sc_core_stopping)
   9619 		goto out;
   9620 
   9621 	if ((reg & ICR_LSC) != 0) {
   9622 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9623 		wm_linkintr(sc, ICR_LSC);
   9624 	}
   9625 
   9626 	/*
   9627 	 * XXX 82574 MSI-X mode workaround
   9628 	 *
   9629 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9630 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9631 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9632 	 * interrupts by writing WMREG_ICS to process receive packets.
   9633 	 */
   9634 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9635 #if defined(WM_DEBUG)
   9636 		log(LOG_WARNING, "%s: Receive overrun\n",
   9637 		    device_xname(sc->sc_dev));
   9638 #endif /* defined(WM_DEBUG) */
   9639 
   9640 		has_rxo = true;
   9641 		/*
   9642 		 * The RXO interrupt is very high rate when receive traffic is
   9643 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9644 		 * interrupts. ICR_OTHER will be enabled at the end of
   9645 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9646 		 * ICR_RXQ(1) interrupts.
   9647 		 */
   9648 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9649 
   9650 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9651 	}
   9652 
   9653 
   9654 
   9655 out:
   9656 	WM_CORE_UNLOCK(sc);
   9657 
   9658 	if (sc->sc_type == WM_T_82574) {
   9659 		if (!has_rxo)
   9660 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9661 		else
   9662 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9663 	} else if (sc->sc_type == WM_T_82575)
   9664 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9665 	else
   9666 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9667 
   9668 	return 1;
   9669 }
   9670 
   9671 /*
   9672  * Media related.
   9673  * GMII, SGMII, TBI (and SERDES)
   9674  */
   9675 
   9676 /* Common */
   9677 
   9678 /*
   9679  * wm_tbi_serdes_set_linkled:
   9680  *
   9681  *	Update the link LED on TBI and SERDES devices.
   9682  */
   9683 static void
   9684 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9685 {
   9686 
   9687 	if (sc->sc_tbi_linkup)
   9688 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9689 	else
   9690 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9691 
   9692 	/* 82540 or newer devices are active low */
   9693 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9694 
   9695 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9696 }
   9697 
   9698 /* GMII related */
   9699 
   9700 /*
   9701  * wm_gmii_reset:
   9702  *
   9703  *	Reset the PHY.
   9704  */
   9705 static void
   9706 wm_gmii_reset(struct wm_softc *sc)
   9707 {
   9708 	uint32_t reg;
   9709 	int rv;
   9710 
   9711 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9712 		device_xname(sc->sc_dev), __func__));
   9713 
   9714 	rv = sc->phy.acquire(sc);
   9715 	if (rv != 0) {
   9716 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9717 		    __func__);
   9718 		return;
   9719 	}
   9720 
   9721 	switch (sc->sc_type) {
   9722 	case WM_T_82542_2_0:
   9723 	case WM_T_82542_2_1:
   9724 		/* null */
   9725 		break;
   9726 	case WM_T_82543:
   9727 		/*
   9728 		 * With 82543, we need to force speed and duplex on the MAC
   9729 		 * equal to what the PHY speed and duplex configuration is.
   9730 		 * In addition, we need to perform a hardware reset on the PHY
   9731 		 * to take it out of reset.
   9732 		 */
   9733 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9734 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9735 
   9736 		/* The PHY reset pin is active-low. */
   9737 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9738 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9739 		    CTRL_EXT_SWDPIN(4));
   9740 		reg |= CTRL_EXT_SWDPIO(4);
   9741 
   9742 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9743 		CSR_WRITE_FLUSH(sc);
   9744 		delay(10*1000);
   9745 
   9746 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9747 		CSR_WRITE_FLUSH(sc);
   9748 		delay(150);
   9749 #if 0
   9750 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9751 #endif
   9752 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9753 		break;
   9754 	case WM_T_82544:	/* reset 10000us */
   9755 	case WM_T_82540:
   9756 	case WM_T_82545:
   9757 	case WM_T_82545_3:
   9758 	case WM_T_82546:
   9759 	case WM_T_82546_3:
   9760 	case WM_T_82541:
   9761 	case WM_T_82541_2:
   9762 	case WM_T_82547:
   9763 	case WM_T_82547_2:
   9764 	case WM_T_82571:	/* reset 100us */
   9765 	case WM_T_82572:
   9766 	case WM_T_82573:
   9767 	case WM_T_82574:
   9768 	case WM_T_82575:
   9769 	case WM_T_82576:
   9770 	case WM_T_82580:
   9771 	case WM_T_I350:
   9772 	case WM_T_I354:
   9773 	case WM_T_I210:
   9774 	case WM_T_I211:
   9775 	case WM_T_82583:
   9776 	case WM_T_80003:
   9777 		/* generic reset */
   9778 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9779 		CSR_WRITE_FLUSH(sc);
   9780 		delay(20000);
   9781 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9782 		CSR_WRITE_FLUSH(sc);
   9783 		delay(20000);
   9784 
   9785 		if ((sc->sc_type == WM_T_82541)
   9786 		    || (sc->sc_type == WM_T_82541_2)
   9787 		    || (sc->sc_type == WM_T_82547)
   9788 		    || (sc->sc_type == WM_T_82547_2)) {
   9789 			/* workaround for igp are done in igp_reset() */
   9790 			/* XXX add code to set LED after phy reset */
   9791 		}
   9792 		break;
   9793 	case WM_T_ICH8:
   9794 	case WM_T_ICH9:
   9795 	case WM_T_ICH10:
   9796 	case WM_T_PCH:
   9797 	case WM_T_PCH2:
   9798 	case WM_T_PCH_LPT:
   9799 	case WM_T_PCH_SPT:
   9800 	case WM_T_PCH_CNP:
   9801 		/* generic reset */
   9802 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9803 		CSR_WRITE_FLUSH(sc);
   9804 		delay(100);
   9805 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9806 		CSR_WRITE_FLUSH(sc);
   9807 		delay(150);
   9808 		break;
   9809 	default:
   9810 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9811 		    __func__);
   9812 		break;
   9813 	}
   9814 
   9815 	sc->phy.release(sc);
   9816 
   9817 	/* get_cfg_done */
   9818 	wm_get_cfg_done(sc);
   9819 
   9820 	/* extra setup */
   9821 	switch (sc->sc_type) {
   9822 	case WM_T_82542_2_0:
   9823 	case WM_T_82542_2_1:
   9824 	case WM_T_82543:
   9825 	case WM_T_82544:
   9826 	case WM_T_82540:
   9827 	case WM_T_82545:
   9828 	case WM_T_82545_3:
   9829 	case WM_T_82546:
   9830 	case WM_T_82546_3:
   9831 	case WM_T_82541_2:
   9832 	case WM_T_82547_2:
   9833 	case WM_T_82571:
   9834 	case WM_T_82572:
   9835 	case WM_T_82573:
   9836 	case WM_T_82574:
   9837 	case WM_T_82583:
   9838 	case WM_T_82575:
   9839 	case WM_T_82576:
   9840 	case WM_T_82580:
   9841 	case WM_T_I350:
   9842 	case WM_T_I354:
   9843 	case WM_T_I210:
   9844 	case WM_T_I211:
   9845 	case WM_T_80003:
   9846 		/* null */
   9847 		break;
   9848 	case WM_T_82541:
   9849 	case WM_T_82547:
   9850 		/* XXX Configure actively LED after PHY reset */
   9851 		break;
   9852 	case WM_T_ICH8:
   9853 	case WM_T_ICH9:
   9854 	case WM_T_ICH10:
   9855 	case WM_T_PCH:
   9856 	case WM_T_PCH2:
   9857 	case WM_T_PCH_LPT:
   9858 	case WM_T_PCH_SPT:
   9859 	case WM_T_PCH_CNP:
   9860 		wm_phy_post_reset(sc);
   9861 		break;
   9862 	default:
   9863 		panic("%s: unknown type\n", __func__);
   9864 		break;
   9865 	}
   9866 }
   9867 
   9868 /*
   9869  * Setup sc_phytype and mii_{read|write}reg.
   9870  *
   9871  *  To identify PHY type, correct read/write function should be selected.
   9872  * To select correct read/write function, PCI ID or MAC type are required
   9873  * without accessing PHY registers.
   9874  *
   9875  *  On the first call of this function, PHY ID is not known yet. Check
   9876  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9877  * result might be incorrect.
   9878  *
   9879  *  In the second call, PHY OUI and model is used to identify PHY type.
   9880  * It might not be perfpect because of the lack of compared entry, but it
   9881  * would be better than the first call.
   9882  *
   9883  *  If the detected new result and previous assumption is different,
   9884  * diagnous message will be printed.
   9885  */
   9886 static void
   9887 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9888     uint16_t phy_model)
   9889 {
   9890 	device_t dev = sc->sc_dev;
   9891 	struct mii_data *mii = &sc->sc_mii;
   9892 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9893 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9894 	mii_readreg_t new_readreg;
   9895 	mii_writereg_t new_writereg;
   9896 
   9897 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9898 		device_xname(sc->sc_dev), __func__));
   9899 
   9900 	if (mii->mii_readreg == NULL) {
   9901 		/*
   9902 		 *  This is the first call of this function. For ICH and PCH
   9903 		 * variants, it's difficult to determine the PHY access method
   9904 		 * by sc_type, so use the PCI product ID for some devices.
   9905 		 */
   9906 
   9907 		switch (sc->sc_pcidevid) {
   9908 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9909 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9910 			/* 82577 */
   9911 			new_phytype = WMPHY_82577;
   9912 			break;
   9913 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9914 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9915 			/* 82578 */
   9916 			new_phytype = WMPHY_82578;
   9917 			break;
   9918 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9919 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9920 			/* 82579 */
   9921 			new_phytype = WMPHY_82579;
   9922 			break;
   9923 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9924 		case PCI_PRODUCT_INTEL_82801I_BM:
   9925 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9926 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9927 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9928 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9929 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9930 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9931 			/* ICH8, 9, 10 with 82567 */
   9932 			new_phytype = WMPHY_BM;
   9933 			break;
   9934 		default:
   9935 			break;
   9936 		}
   9937 	} else {
   9938 		/* It's not the first call. Use PHY OUI and model */
   9939 		switch (phy_oui) {
   9940 		case MII_OUI_ATTANSIC: /* XXX ??? */
   9941 			switch (phy_model) {
   9942 			case 0x0004: /* XXX */
   9943 				new_phytype = WMPHY_82578;
   9944 				break;
   9945 			default:
   9946 				break;
   9947 			}
   9948 			break;
   9949 		case MII_OUI_xxMARVELL:
   9950 			switch (phy_model) {
   9951 			case MII_MODEL_xxMARVELL_I210:
   9952 				new_phytype = WMPHY_I210;
   9953 				break;
   9954 			case MII_MODEL_xxMARVELL_E1011:
   9955 			case MII_MODEL_xxMARVELL_E1000_3:
   9956 			case MII_MODEL_xxMARVELL_E1000_5:
   9957 			case MII_MODEL_xxMARVELL_E1112:
   9958 				new_phytype = WMPHY_M88;
   9959 				break;
   9960 			case MII_MODEL_xxMARVELL_E1149:
   9961 				new_phytype = WMPHY_BM;
   9962 				break;
   9963 			case MII_MODEL_xxMARVELL_E1111:
   9964 			case MII_MODEL_xxMARVELL_I347:
   9965 			case MII_MODEL_xxMARVELL_E1512:
   9966 			case MII_MODEL_xxMARVELL_E1340M:
   9967 			case MII_MODEL_xxMARVELL_E1543:
   9968 				new_phytype = WMPHY_M88;
   9969 				break;
   9970 			case MII_MODEL_xxMARVELL_I82563:
   9971 				new_phytype = WMPHY_GG82563;
   9972 				break;
   9973 			default:
   9974 				break;
   9975 			}
   9976 			break;
   9977 		case MII_OUI_INTEL:
   9978 			switch (phy_model) {
   9979 			case MII_MODEL_INTEL_I82577:
   9980 				new_phytype = WMPHY_82577;
   9981 				break;
   9982 			case MII_MODEL_INTEL_I82579:
   9983 				new_phytype = WMPHY_82579;
   9984 				break;
   9985 			case MII_MODEL_INTEL_I217:
   9986 				new_phytype = WMPHY_I217;
   9987 				break;
   9988 			case MII_MODEL_INTEL_I82580:
   9989 			case MII_MODEL_INTEL_I350:
   9990 				new_phytype = WMPHY_82580;
   9991 				break;
   9992 			default:
   9993 				break;
   9994 			}
   9995 			break;
   9996 		case MII_OUI_yyINTEL:
   9997 			switch (phy_model) {
   9998 			case MII_MODEL_yyINTEL_I82562G:
   9999 			case MII_MODEL_yyINTEL_I82562EM:
   10000 			case MII_MODEL_yyINTEL_I82562ET:
   10001 				new_phytype = WMPHY_IFE;
   10002 				break;
   10003 			case MII_MODEL_yyINTEL_IGP01E1000:
   10004 				new_phytype = WMPHY_IGP;
   10005 				break;
   10006 			case MII_MODEL_yyINTEL_I82566:
   10007 				new_phytype = WMPHY_IGP_3;
   10008 				break;
   10009 			default:
   10010 				break;
   10011 			}
   10012 			break;
   10013 		default:
   10014 			break;
   10015 		}
   10016 		if (new_phytype == WMPHY_UNKNOWN)
   10017 			aprint_verbose_dev(dev,
   10018 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
   10019 			    __func__, phy_oui, phy_model);
   10020 
   10021 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10022 		    && (sc->sc_phytype != new_phytype )) {
   10023 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10024 			    "was incorrect. PHY type from PHY ID = %u\n",
   10025 			    sc->sc_phytype, new_phytype);
   10026 		}
   10027 	}
   10028 
   10029 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10030 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10031 		/* SGMII */
   10032 		new_readreg = wm_sgmii_readreg;
   10033 		new_writereg = wm_sgmii_writereg;
   10034 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10035 		/* BM2 (phyaddr == 1) */
   10036 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10037 		    && (new_phytype != WMPHY_BM)
   10038 		    && (new_phytype != WMPHY_UNKNOWN))
   10039 			doubt_phytype = new_phytype;
   10040 		new_phytype = WMPHY_BM;
   10041 		new_readreg = wm_gmii_bm_readreg;
   10042 		new_writereg = wm_gmii_bm_writereg;
   10043 	} else if (sc->sc_type >= WM_T_PCH) {
   10044 		/* All PCH* use _hv_ */
   10045 		new_readreg = wm_gmii_hv_readreg;
   10046 		new_writereg = wm_gmii_hv_writereg;
   10047 	} else if (sc->sc_type >= WM_T_ICH8) {
   10048 		/* non-82567 ICH8, 9 and 10 */
   10049 		new_readreg = wm_gmii_i82544_readreg;
   10050 		new_writereg = wm_gmii_i82544_writereg;
   10051 	} else if (sc->sc_type >= WM_T_80003) {
   10052 		/* 80003 */
   10053 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10054 		    && (new_phytype != WMPHY_GG82563)
   10055 		    && (new_phytype != WMPHY_UNKNOWN))
   10056 			doubt_phytype = new_phytype;
   10057 		new_phytype = WMPHY_GG82563;
   10058 		new_readreg = wm_gmii_i80003_readreg;
   10059 		new_writereg = wm_gmii_i80003_writereg;
   10060 	} else if (sc->sc_type >= WM_T_I210) {
   10061 		/* I210 and I211 */
   10062 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10063 		    && (new_phytype != WMPHY_I210)
   10064 		    && (new_phytype != WMPHY_UNKNOWN))
   10065 			doubt_phytype = new_phytype;
   10066 		new_phytype = WMPHY_I210;
   10067 		new_readreg = wm_gmii_gs40g_readreg;
   10068 		new_writereg = wm_gmii_gs40g_writereg;
   10069 	} else if (sc->sc_type >= WM_T_82580) {
   10070 		/* 82580, I350 and I354 */
   10071 		new_readreg = wm_gmii_82580_readreg;
   10072 		new_writereg = wm_gmii_82580_writereg;
   10073 	} else if (sc->sc_type >= WM_T_82544) {
   10074 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10075 		new_readreg = wm_gmii_i82544_readreg;
   10076 		new_writereg = wm_gmii_i82544_writereg;
   10077 	} else {
   10078 		new_readreg = wm_gmii_i82543_readreg;
   10079 		new_writereg = wm_gmii_i82543_writereg;
   10080 	}
   10081 
   10082 	if (new_phytype == WMPHY_BM) {
   10083 		/* All BM use _bm_ */
   10084 		new_readreg = wm_gmii_bm_readreg;
   10085 		new_writereg = wm_gmii_bm_writereg;
   10086 	}
   10087 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10088 		/* All PCH* use _hv_ */
   10089 		new_readreg = wm_gmii_hv_readreg;
   10090 		new_writereg = wm_gmii_hv_writereg;
   10091 	}
   10092 
   10093 	/* Diag output */
   10094 	if (doubt_phytype != WMPHY_UNKNOWN)
   10095 		aprint_error_dev(dev, "Assumed new PHY type was "
   10096 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10097 		    new_phytype);
   10098 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10099 	    && (sc->sc_phytype != new_phytype ))
   10100 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10101 		    "was incorrect. New PHY type = %u\n",
   10102 		    sc->sc_phytype, new_phytype);
   10103 
   10104 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   10105 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   10106 
   10107 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   10108 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   10109 		    "function was incorrect.\n");
   10110 
   10111 	/* Update now */
   10112 	sc->sc_phytype = new_phytype;
   10113 	mii->mii_readreg = new_readreg;
   10114 	mii->mii_writereg = new_writereg;
   10115 	if (new_readreg == wm_gmii_hv_readreg) {
   10116 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10117 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10118 	} else if (new_readreg == wm_sgmii_readreg) {
   10119 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10120 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10121 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10122 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10123 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10124 	}
   10125 }
   10126 
   10127 /*
   10128  * wm_get_phy_id_82575:
   10129  *
   10130  * Return PHY ID. Return -1 if it failed.
   10131  */
   10132 static int
   10133 wm_get_phy_id_82575(struct wm_softc *sc)
   10134 {
   10135 	uint32_t reg;
   10136 	int phyid = -1;
   10137 
   10138 	/* XXX */
   10139 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10140 		return -1;
   10141 
   10142 	if (wm_sgmii_uses_mdio(sc)) {
   10143 		switch (sc->sc_type) {
   10144 		case WM_T_82575:
   10145 		case WM_T_82576:
   10146 			reg = CSR_READ(sc, WMREG_MDIC);
   10147 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10148 			break;
   10149 		case WM_T_82580:
   10150 		case WM_T_I350:
   10151 		case WM_T_I354:
   10152 		case WM_T_I210:
   10153 		case WM_T_I211:
   10154 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10155 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10156 			break;
   10157 		default:
   10158 			return -1;
   10159 		}
   10160 	}
   10161 
   10162 	return phyid;
   10163 }
   10164 
   10165 
   10166 /*
   10167  * wm_gmii_mediainit:
   10168  *
   10169  *	Initialize media for use on 1000BASE-T devices.
   10170  */
   10171 static void
   10172 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10173 {
   10174 	device_t dev = sc->sc_dev;
   10175 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10176 	struct mii_data *mii = &sc->sc_mii;
   10177 	uint32_t reg;
   10178 
   10179 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10180 		device_xname(sc->sc_dev), __func__));
   10181 
   10182 	/* We have GMII. */
   10183 	sc->sc_flags |= WM_F_HAS_MII;
   10184 
   10185 	if (sc->sc_type == WM_T_80003)
   10186 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10187 	else
   10188 		sc->sc_tipg = TIPG_1000T_DFLT;
   10189 
   10190 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10191 	if ((sc->sc_type == WM_T_82580)
   10192 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10193 	    || (sc->sc_type == WM_T_I211)) {
   10194 		reg = CSR_READ(sc, WMREG_PHPM);
   10195 		reg &= ~PHPM_GO_LINK_D;
   10196 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10197 	}
   10198 
   10199 	/*
   10200 	 * Let the chip set speed/duplex on its own based on
   10201 	 * signals from the PHY.
   10202 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10203 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10204 	 */
   10205 	sc->sc_ctrl |= CTRL_SLU;
   10206 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10207 
   10208 	/* Initialize our media structures and probe the GMII. */
   10209 	mii->mii_ifp = ifp;
   10210 
   10211 	mii->mii_statchg = wm_gmii_statchg;
   10212 
   10213 	/* get PHY control from SMBus to PCIe */
   10214 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10215 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10216 	    || (sc->sc_type == WM_T_PCH_CNP))
   10217 		wm_init_phy_workarounds_pchlan(sc);
   10218 
   10219 	wm_gmii_reset(sc);
   10220 
   10221 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10222 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10223 	    wm_gmii_mediastatus);
   10224 
   10225 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10226 	    || (sc->sc_type == WM_T_82580)
   10227 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10228 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10229 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10230 			/* Attach only one port */
   10231 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10232 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10233 		} else {
   10234 			int i, id;
   10235 			uint32_t ctrl_ext;
   10236 
   10237 			id = wm_get_phy_id_82575(sc);
   10238 			if (id != -1) {
   10239 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10240 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10241 			}
   10242 			if ((id == -1)
   10243 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10244 				/* Power on sgmii phy if it is disabled */
   10245 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10246 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10247 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10248 				CSR_WRITE_FLUSH(sc);
   10249 				delay(300*1000); /* XXX too long */
   10250 
   10251 				/* from 1 to 8 */
   10252 				for (i = 1; i < 8; i++)
   10253 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10254 					    0xffffffff, i, MII_OFFSET_ANY,
   10255 					    MIIF_DOPAUSE);
   10256 
   10257 				/* restore previous sfp cage power state */
   10258 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10259 			}
   10260 		}
   10261 	} else
   10262 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10263 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10264 
   10265 	/*
   10266 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10267 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10268 	 */
   10269 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10270 		|| (sc->sc_type == WM_T_PCH_SPT)
   10271 		|| (sc->sc_type == WM_T_PCH_CNP))
   10272 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10273 		wm_set_mdio_slow_mode_hv(sc);
   10274 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10275 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10276 	}
   10277 
   10278 	/*
   10279 	 * (For ICH8 variants)
   10280 	 * If PHY detection failed, use BM's r/w function and retry.
   10281 	 */
   10282 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10283 		/* if failed, retry with *_bm_* */
   10284 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10285 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10286 		    sc->sc_phytype);
   10287 		sc->sc_phytype = WMPHY_BM;
   10288 		mii->mii_readreg = wm_gmii_bm_readreg;
   10289 		mii->mii_writereg = wm_gmii_bm_writereg;
   10290 
   10291 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10292 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10293 	}
   10294 
   10295 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10296 		/* Any PHY wasn't find */
   10297 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10298 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10299 		sc->sc_phytype = WMPHY_NONE;
   10300 	} else {
   10301 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10302 
   10303 		/*
   10304 		 * PHY Found! Check PHY type again by the second call of
   10305 		 * wm_gmii_setup_phytype.
   10306 		 */
   10307 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10308 		    child->mii_mpd_model);
   10309 
   10310 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10311 	}
   10312 }
   10313 
   10314 /*
   10315  * wm_gmii_mediachange:	[ifmedia interface function]
   10316  *
   10317  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10318  */
   10319 static int
   10320 wm_gmii_mediachange(struct ifnet *ifp)
   10321 {
   10322 	struct wm_softc *sc = ifp->if_softc;
   10323 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10324 	int rc;
   10325 
   10326 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10327 		device_xname(sc->sc_dev), __func__));
   10328 	if ((ifp->if_flags & IFF_UP) == 0)
   10329 		return 0;
   10330 
   10331 	/* Disable D0 LPLU. */
   10332 	wm_lplu_d0_disable(sc);
   10333 
   10334 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10335 	sc->sc_ctrl |= CTRL_SLU;
   10336 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10337 	    || (sc->sc_type > WM_T_82543)) {
   10338 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10339 	} else {
   10340 		sc->sc_ctrl &= ~CTRL_ASDE;
   10341 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10342 		if (ife->ifm_media & IFM_FDX)
   10343 			sc->sc_ctrl |= CTRL_FD;
   10344 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10345 		case IFM_10_T:
   10346 			sc->sc_ctrl |= CTRL_SPEED_10;
   10347 			break;
   10348 		case IFM_100_TX:
   10349 			sc->sc_ctrl |= CTRL_SPEED_100;
   10350 			break;
   10351 		case IFM_1000_T:
   10352 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10353 			break;
   10354 		case IFM_NONE:
   10355 			/* There is no specific setting for IFM_NONE */
   10356 			break;
   10357 		default:
   10358 			panic("wm_gmii_mediachange: bad media 0x%x",
   10359 			    ife->ifm_media);
   10360 		}
   10361 	}
   10362 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10363 	CSR_WRITE_FLUSH(sc);
   10364 	if (sc->sc_type <= WM_T_82543)
   10365 		wm_gmii_reset(sc);
   10366 
   10367 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10368 		return 0;
   10369 	return rc;
   10370 }
   10371 
   10372 /*
   10373  * wm_gmii_mediastatus:	[ifmedia interface function]
   10374  *
   10375  *	Get the current interface media status on a 1000BASE-T device.
   10376  */
   10377 static void
   10378 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10379 {
   10380 	struct wm_softc *sc = ifp->if_softc;
   10381 
   10382 	ether_mediastatus(ifp, ifmr);
   10383 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10384 	    | sc->sc_flowflags;
   10385 }
   10386 
   10387 #define	MDI_IO		CTRL_SWDPIN(2)
   10388 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10389 #define	MDI_CLK		CTRL_SWDPIN(3)
   10390 
   10391 static void
   10392 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10393 {
   10394 	uint32_t i, v;
   10395 
   10396 	v = CSR_READ(sc, WMREG_CTRL);
   10397 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10398 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10399 
   10400 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   10401 		if (data & i)
   10402 			v |= MDI_IO;
   10403 		else
   10404 			v &= ~MDI_IO;
   10405 		CSR_WRITE(sc, WMREG_CTRL, v);
   10406 		CSR_WRITE_FLUSH(sc);
   10407 		delay(10);
   10408 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10409 		CSR_WRITE_FLUSH(sc);
   10410 		delay(10);
   10411 		CSR_WRITE(sc, WMREG_CTRL, v);
   10412 		CSR_WRITE_FLUSH(sc);
   10413 		delay(10);
   10414 	}
   10415 }
   10416 
   10417 static uint16_t
   10418 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10419 {
   10420 	uint32_t v, i;
   10421 	uint16_t data = 0;
   10422 
   10423 	v = CSR_READ(sc, WMREG_CTRL);
   10424 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10425 	v |= CTRL_SWDPIO(3);
   10426 
   10427 	CSR_WRITE(sc, WMREG_CTRL, v);
   10428 	CSR_WRITE_FLUSH(sc);
   10429 	delay(10);
   10430 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10431 	CSR_WRITE_FLUSH(sc);
   10432 	delay(10);
   10433 	CSR_WRITE(sc, WMREG_CTRL, v);
   10434 	CSR_WRITE_FLUSH(sc);
   10435 	delay(10);
   10436 
   10437 	for (i = 0; i < 16; i++) {
   10438 		data <<= 1;
   10439 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10440 		CSR_WRITE_FLUSH(sc);
   10441 		delay(10);
   10442 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10443 			data |= 1;
   10444 		CSR_WRITE(sc, WMREG_CTRL, v);
   10445 		CSR_WRITE_FLUSH(sc);
   10446 		delay(10);
   10447 	}
   10448 
   10449 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10450 	CSR_WRITE_FLUSH(sc);
   10451 	delay(10);
   10452 	CSR_WRITE(sc, WMREG_CTRL, v);
   10453 	CSR_WRITE_FLUSH(sc);
   10454 	delay(10);
   10455 
   10456 	return data;
   10457 }
   10458 
   10459 #undef MDI_IO
   10460 #undef MDI_DIR
   10461 #undef MDI_CLK
   10462 
   10463 /*
   10464  * wm_gmii_i82543_readreg:	[mii interface function]
   10465  *
   10466  *	Read a PHY register on the GMII (i82543 version).
   10467  */
   10468 static int
   10469 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10470 {
   10471 	struct wm_softc *sc = device_private(dev);
   10472 
   10473 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10474 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10475 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10476 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10477 
   10478 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10479 		device_xname(dev), phy, reg, *val));
   10480 
   10481 	return 0;
   10482 }
   10483 
   10484 /*
   10485  * wm_gmii_i82543_writereg:	[mii interface function]
   10486  *
   10487  *	Write a PHY register on the GMII (i82543 version).
   10488  */
   10489 static int
   10490 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10491 {
   10492 	struct wm_softc *sc = device_private(dev);
   10493 
   10494 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10495 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10496 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10497 	    (MII_COMMAND_START << 30), 32);
   10498 
   10499 	return 0;
   10500 }
   10501 
   10502 /*
   10503  * wm_gmii_mdic_readreg:	[mii interface function]
   10504  *
   10505  *	Read a PHY register on the GMII.
   10506  */
   10507 static int
   10508 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10509 {
   10510 	struct wm_softc *sc = device_private(dev);
   10511 	uint32_t mdic = 0;
   10512 	int i;
   10513 
   10514 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10515 	    && (reg > MII_ADDRMASK)) {
   10516 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10517 		    __func__, sc->sc_phytype, reg);
   10518 		reg &= MII_ADDRMASK;
   10519 	}
   10520 
   10521 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10522 	    MDIC_REGADD(reg));
   10523 
   10524 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10525 		delay(50);
   10526 		mdic = CSR_READ(sc, WMREG_MDIC);
   10527 		if (mdic & MDIC_READY)
   10528 			break;
   10529 	}
   10530 
   10531 	if ((mdic & MDIC_READY) == 0) {
   10532 		DPRINTF(WM_DEBUG_GMII,
   10533 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10534 			device_xname(dev), phy, reg));
   10535 		return ETIMEDOUT;
   10536 	} else if (mdic & MDIC_E) {
   10537 		/* This is normal if no PHY is present. */
   10538 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10539 			device_xname(sc->sc_dev), phy, reg));
   10540 		return -1;
   10541 	} else
   10542 		*val = MDIC_DATA(mdic);
   10543 
   10544 	/*
   10545 	 * Allow some time after each MDIC transaction to avoid
   10546 	 * reading duplicate data in the next MDIC transaction.
   10547 	 */
   10548 	if (sc->sc_type == WM_T_PCH2)
   10549 		delay(100);
   10550 
   10551 	return 0;
   10552 }
   10553 
   10554 /*
   10555  * wm_gmii_mdic_writereg:	[mii interface function]
   10556  *
   10557  *	Write a PHY register on the GMII.
   10558  */
   10559 static int
   10560 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10561 {
   10562 	struct wm_softc *sc = device_private(dev);
   10563 	uint32_t mdic = 0;
   10564 	int i;
   10565 
   10566 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10567 	    && (reg > MII_ADDRMASK)) {
   10568 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10569 		    __func__, sc->sc_phytype, reg);
   10570 		reg &= MII_ADDRMASK;
   10571 	}
   10572 
   10573 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10574 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10575 
   10576 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10577 		delay(50);
   10578 		mdic = CSR_READ(sc, WMREG_MDIC);
   10579 		if (mdic & MDIC_READY)
   10580 			break;
   10581 	}
   10582 
   10583 	if ((mdic & MDIC_READY) == 0) {
   10584 		DPRINTF(WM_DEBUG_GMII,
   10585 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10586 			device_xname(dev), phy, reg));
   10587 		return ETIMEDOUT;
   10588 	} else if (mdic & MDIC_E) {
   10589 		DPRINTF(WM_DEBUG_GMII,
   10590 		    ("%s: MDIC write error: phy %d reg %d\n",
   10591 			device_xname(dev), phy, reg));
   10592 		return -1;
   10593 	}
   10594 
   10595 	/*
   10596 	 * Allow some time after each MDIC transaction to avoid
   10597 	 * reading duplicate data in the next MDIC transaction.
   10598 	 */
   10599 	if (sc->sc_type == WM_T_PCH2)
   10600 		delay(100);
   10601 
   10602 	return 0;
   10603 }
   10604 
   10605 /*
   10606  * wm_gmii_i82544_readreg:	[mii interface function]
   10607  *
   10608  *	Read a PHY register on the GMII.
   10609  */
   10610 static int
   10611 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10612 {
   10613 	struct wm_softc *sc = device_private(dev);
   10614 	int rv;
   10615 
   10616 	if (sc->phy.acquire(sc)) {
   10617 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10618 		return -1;
   10619 	}
   10620 
   10621 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10622 
   10623 	sc->phy.release(sc);
   10624 
   10625 	return rv;
   10626 }
   10627 
   10628 static int
   10629 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10630 {
   10631 	struct wm_softc *sc = device_private(dev);
   10632 
   10633 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10634 		switch (sc->sc_phytype) {
   10635 		case WMPHY_IGP:
   10636 		case WMPHY_IGP_2:
   10637 		case WMPHY_IGP_3:
   10638 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10639 			    reg);
   10640 			break;
   10641 		default:
   10642 #ifdef WM_DEBUG
   10643 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10644 			    __func__, sc->sc_phytype, reg);
   10645 #endif
   10646 			break;
   10647 		}
   10648 	}
   10649 
   10650 	wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10651 
   10652 	return 0;
   10653 }
   10654 
   10655 /*
   10656  * wm_gmii_i82544_writereg:	[mii interface function]
   10657  *
   10658  *	Write a PHY register on the GMII.
   10659  */
   10660 static int
   10661 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10662 {
   10663 	struct wm_softc *sc = device_private(dev);
   10664 	int rv;
   10665 
   10666 	if (sc->phy.acquire(sc)) {
   10667 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10668 		return -1;
   10669 	}
   10670 
   10671 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10672 	sc->phy.release(sc);
   10673 
   10674 	return rv;
   10675 }
   10676 
   10677 static int
   10678 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10679 {
   10680 	struct wm_softc *sc = device_private(dev);
   10681 
   10682 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10683 		switch (sc->sc_phytype) {
   10684 		case WMPHY_IGP:
   10685 		case WMPHY_IGP_2:
   10686 		case WMPHY_IGP_3:
   10687 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10688 			    reg);
   10689 			break;
   10690 		default:
   10691 #ifdef WM_DEBUG
   10692 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10693 			    __func__, sc->sc_phytype, reg);
   10694 #endif
   10695 			break;
   10696 		}
   10697 	}
   10698 
   10699 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10700 
   10701 	return 0;
   10702 }
   10703 
   10704 /*
   10705  * wm_gmii_i80003_readreg:	[mii interface function]
   10706  *
   10707  *	Read a PHY register on the kumeran
   10708  * This could be handled by the PHY layer if we didn't have to lock the
   10709  * ressource ...
   10710  */
   10711 static int
   10712 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10713 {
   10714 	struct wm_softc *sc = device_private(dev);
   10715 	int page_select;
   10716 	uint16_t temp, temp2;
   10717 	int rv = 0;
   10718 
   10719 	if (phy != 1) /* only one PHY on kumeran bus */
   10720 		return -1;
   10721 
   10722 	if (sc->phy.acquire(sc)) {
   10723 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10724 		return -1;
   10725 	}
   10726 
   10727 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10728 		page_select = GG82563_PHY_PAGE_SELECT;
   10729 	else {
   10730 		/*
   10731 		 * Use Alternative Page Select register to access registers
   10732 		 * 30 and 31.
   10733 		 */
   10734 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10735 	}
   10736 	temp = reg >> GG82563_PAGE_SHIFT;
   10737 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10738 		goto out;
   10739 
   10740 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10741 		/*
   10742 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10743 		 * register.
   10744 		 */
   10745 		delay(200);
   10746 		wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10747 		if (temp2 != temp) {
   10748 			device_printf(dev, "%s failed\n", __func__);
   10749 			rv = -1;
   10750 			goto out;
   10751 		}
   10752 		delay(200);
   10753 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10754 		delay(200);
   10755 	} else
   10756 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10757 
   10758 out:
   10759 	sc->phy.release(sc);
   10760 	return rv;
   10761 }
   10762 
   10763 /*
   10764  * wm_gmii_i80003_writereg:	[mii interface function]
   10765  *
   10766  *	Write a PHY register on the kumeran.
   10767  * This could be handled by the PHY layer if we didn't have to lock the
   10768  * ressource ...
   10769  */
   10770 static int
   10771 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   10772 {
   10773 	struct wm_softc *sc = device_private(dev);
   10774 	int page_select, rv;
   10775 	uint16_t temp, temp2;
   10776 
   10777 	if (phy != 1) /* only one PHY on kumeran bus */
   10778 		return -1;
   10779 
   10780 	if (sc->phy.acquire(sc)) {
   10781 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10782 		return -1;
   10783 	}
   10784 
   10785 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10786 		page_select = GG82563_PHY_PAGE_SELECT;
   10787 	else {
   10788 		/*
   10789 		 * Use Alternative Page Select register to access registers
   10790 		 * 30 and 31.
   10791 		 */
   10792 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10793 	}
   10794 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10795 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10796 		goto out;
   10797 
   10798 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10799 		/*
   10800 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10801 		 * register.
   10802 		 */
   10803 		delay(200);
   10804 		wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10805 		if (temp2 != temp) {
   10806 			device_printf(dev, "%s failed\n", __func__);
   10807 			rv = -1;
   10808 			goto out;
   10809 		}
   10810 		delay(200);
   10811 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10812 		delay(200);
   10813 	} else
   10814 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10815 
   10816 out:
   10817 	sc->phy.release(sc);
   10818 	return rv;
   10819 }
   10820 
   10821 /*
   10822  * wm_gmii_bm_readreg:	[mii interface function]
   10823  *
   10824  *	Read a PHY register on the kumeran
   10825  * This could be handled by the PHY layer if we didn't have to lock the
   10826  * ressource ...
   10827  */
   10828 static int
   10829 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10830 {
   10831 	struct wm_softc *sc = device_private(dev);
   10832 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10833 	int rv;
   10834 
   10835 	if (sc->phy.acquire(sc)) {
   10836 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10837 		return -1;
   10838 	}
   10839 
   10840 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10841 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10842 		    || (reg == 31)) ? 1 : phy;
   10843 	/* Page 800 works differently than the rest so it has its own func */
   10844 	if (page == BM_WUC_PAGE) {
   10845 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   10846 		goto release;
   10847 	}
   10848 
   10849 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10850 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10851 		    && (sc->sc_type != WM_T_82583))
   10852 			rv = wm_gmii_mdic_writereg(dev, phy,
   10853 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10854 		else
   10855 			rv = wm_gmii_mdic_writereg(dev, phy,
   10856 			    BME1000_PHY_PAGE_SELECT, page);
   10857 		if (rv != 0)
   10858 			goto release;
   10859 	}
   10860 
   10861 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10862 
   10863 release:
   10864 	sc->phy.release(sc);
   10865 	return rv;
   10866 }
   10867 
   10868 /*
   10869  * wm_gmii_bm_writereg:	[mii interface function]
   10870  *
   10871  *	Write a PHY register on the kumeran.
   10872  * This could be handled by the PHY layer if we didn't have to lock the
   10873  * ressource ...
   10874  */
   10875 static int
   10876 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   10877 {
   10878 	struct wm_softc *sc = device_private(dev);
   10879 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10880 	int rv;
   10881 
   10882 	if (sc->phy.acquire(sc)) {
   10883 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10884 		return -1;
   10885 	}
   10886 
   10887 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10888 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10889 		    || (reg == 31)) ? 1 : phy;
   10890 	/* Page 800 works differently than the rest so it has its own func */
   10891 	if (page == BM_WUC_PAGE) {
   10892 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   10893 		goto release;
   10894 	}
   10895 
   10896 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10897 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10898 		    && (sc->sc_type != WM_T_82583))
   10899 			rv = wm_gmii_mdic_writereg(dev, phy,
   10900 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10901 		else
   10902 			rv = wm_gmii_mdic_writereg(dev, phy,
   10903 			    BME1000_PHY_PAGE_SELECT, page);
   10904 		if (rv != 0)
   10905 			goto release;
   10906 	}
   10907 
   10908 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10909 
   10910 release:
   10911 	sc->phy.release(sc);
   10912 	return rv;
   10913 }
   10914 
   10915 /*
   10916  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   10917  *  @dev: pointer to the HW structure
   10918  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   10919  *
   10920  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   10921  *  address to store contents of the BM_WUC_ENABLE_REG register.
   10922  */
   10923 static int
   10924 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10925 {
   10926 	uint16_t temp;
   10927 	int rv;
   10928 
   10929 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10930 		device_xname(dev), __func__));
   10931 
   10932 	if (!phy_regp)
   10933 		return -1;
   10934 
   10935 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   10936 
   10937 	/* Select Port Control Registers page */
   10938 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10939 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10940 	if (rv != 0)
   10941 		return rv;
   10942 
   10943 	/* Read WUCE and save it */
   10944 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   10945 	if (rv != 0)
   10946 		return rv;
   10947 
   10948 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   10949 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   10950 	 */
   10951 	temp = *phy_regp;
   10952 	temp |= BM_WUC_ENABLE_BIT;
   10953 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10954 
   10955 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   10956 		return rv;
   10957 
   10958 	/* Select Host Wakeup Registers page - caller now able to write
   10959 	 * registers on the Wakeup registers page
   10960 	 */
   10961 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10962 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   10963 }
   10964 
   10965 /*
   10966  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   10967  *  @dev: pointer to the HW structure
   10968  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   10969  *
   10970  *  Restore BM_WUC_ENABLE_REG to its original value.
   10971  *
   10972  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   10973  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   10974  *  caller.
   10975  */
   10976 static int
   10977 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10978 {
   10979 
   10980 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10981 		device_xname(dev), __func__));
   10982 
   10983 	if (!phy_regp)
   10984 		return -1;
   10985 
   10986 	/* Select Port Control Registers page */
   10987 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10988 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10989 
   10990 	/* Restore 769.17 to its original value */
   10991 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   10992 
   10993 	return 0;
   10994 }
   10995 
   10996 /*
   10997  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   10998  *  @sc: pointer to the HW structure
   10999  *  @offset: register offset to be read or written
   11000  *  @val: pointer to the data to read or write
   11001  *  @rd: determines if operation is read or write
   11002  *  @page_set: BM_WUC_PAGE already set and access enabled
   11003  *
   11004  *  Read the PHY register at offset and store the retrieved information in
   11005  *  data, or write data to PHY register at offset.  Note the procedure to
   11006  *  access the PHY wakeup registers is different than reading the other PHY
   11007  *  registers. It works as such:
   11008  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11009  *  2) Set page to 800 for host (801 if we were manageability)
   11010  *  3) Write the address using the address opcode (0x11)
   11011  *  4) Read or write the data using the data opcode (0x12)
   11012  *  5) Restore 769.17.2 to its original value
   11013  *
   11014  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11015  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11016  *
   11017  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11018  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11019  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11020  */
   11021 static int
   11022 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11023 	bool page_set)
   11024 {
   11025 	struct wm_softc *sc = device_private(dev);
   11026 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11027 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11028 	uint16_t wuce;
   11029 	int rv = 0;
   11030 
   11031 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11032 		device_xname(dev), __func__));
   11033 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11034 	if ((sc->sc_type == WM_T_PCH)
   11035 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11036 		device_printf(dev,
   11037 		    "Attempting to access page %d while gig enabled.\n", page);
   11038 	}
   11039 
   11040 	if (!page_set) {
   11041 		/* Enable access to PHY wakeup registers */
   11042 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11043 		if (rv != 0) {
   11044 			device_printf(dev,
   11045 			    "%s: Could not enable PHY wakeup reg access\n",
   11046 			    __func__);
   11047 			return rv;
   11048 		}
   11049 	}
   11050 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11051 		device_xname(sc->sc_dev), __func__, page, regnum));
   11052 
   11053 	/*
   11054 	 * 2) Access PHY wakeup register.
   11055 	 * See wm_access_phy_wakeup_reg_bm.
   11056 	 */
   11057 
   11058 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11059 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11060 	if (rv != 0)
   11061 		return rv;
   11062 
   11063 	if (rd) {
   11064 		/* Read the Wakeup register page value using opcode 0x12 */
   11065 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11066 	} else {
   11067 		/* Write the Wakeup register page value using opcode 0x12 */
   11068 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11069 	}
   11070 	if (rv != 0)
   11071 		return rv;
   11072 
   11073 	if (!page_set)
   11074 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11075 
   11076 	return rv;
   11077 }
   11078 
   11079 /*
   11080  * wm_gmii_hv_readreg:	[mii interface function]
   11081  *
   11082  *	Read a PHY register on the kumeran
   11083  * This could be handled by the PHY layer if we didn't have to lock the
   11084  * ressource ...
   11085  */
   11086 static int
   11087 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11088 {
   11089 	struct wm_softc *sc = device_private(dev);
   11090 	int rv;
   11091 
   11092 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11093 		device_xname(dev), __func__));
   11094 	if (sc->phy.acquire(sc)) {
   11095 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11096 		return -1;
   11097 	}
   11098 
   11099 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11100 	sc->phy.release(sc);
   11101 	return rv;
   11102 }
   11103 
   11104 static int
   11105 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11106 {
   11107 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11108 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11109 	int rv;
   11110 
   11111 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11112 
   11113 	/* Page 800 works differently than the rest so it has its own func */
   11114 	if (page == BM_WUC_PAGE)
   11115 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11116 
   11117 	/*
   11118 	 * Lower than page 768 works differently than the rest so it has its
   11119 	 * own func
   11120 	 */
   11121 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11122 		printf("gmii_hv_readreg!!!\n");
   11123 		return -1;
   11124 	}
   11125 
   11126 	/*
   11127 	 * XXX I21[789] documents say that the SMBus Address register is at
   11128 	 * PHY address 01, Page 0 (not 768), Register 26.
   11129 	 */
   11130 	if (page == HV_INTC_FC_PAGE_START)
   11131 		page = 0;
   11132 
   11133 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11134 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11135 		    page << BME1000_PAGE_SHIFT);
   11136 		if (rv != 0)
   11137 			return rv;
   11138 	}
   11139 
   11140 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11141 }
   11142 
   11143 /*
   11144  * wm_gmii_hv_writereg:	[mii interface function]
   11145  *
   11146  *	Write a PHY register on the kumeran.
   11147  * This could be handled by the PHY layer if we didn't have to lock the
   11148  * ressource ...
   11149  */
   11150 static int
   11151 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11152 {
   11153 	struct wm_softc *sc = device_private(dev);
   11154 	int rv;
   11155 
   11156 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11157 		device_xname(dev), __func__));
   11158 
   11159 	if (sc->phy.acquire(sc)) {
   11160 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11161 		return -1;
   11162 	}
   11163 
   11164 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11165 	sc->phy.release(sc);
   11166 
   11167 	return rv;
   11168 }
   11169 
   11170 static int
   11171 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11172 {
   11173 	struct wm_softc *sc = device_private(dev);
   11174 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11175 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11176 	int rv;
   11177 
   11178 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11179 
   11180 	/* Page 800 works differently than the rest so it has its own func */
   11181 	if (page == BM_WUC_PAGE)
   11182 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11183 		    false);
   11184 
   11185 	/*
   11186 	 * Lower than page 768 works differently than the rest so it has its
   11187 	 * own func
   11188 	 */
   11189 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11190 		printf("gmii_hv_writereg!!!\n");
   11191 		return -1;
   11192 	}
   11193 
   11194 	{
   11195 		/*
   11196 		 * XXX I21[789] documents say that the SMBus Address register
   11197 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11198 		 */
   11199 		if (page == HV_INTC_FC_PAGE_START)
   11200 			page = 0;
   11201 
   11202 		/*
   11203 		 * XXX Workaround MDIO accesses being disabled after entering
   11204 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11205 		 * register is set)
   11206 		 */
   11207 		if (sc->sc_phytype == WMPHY_82578) {
   11208 			struct mii_softc *child;
   11209 
   11210 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11211 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11212 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11213 			    && ((val & (1 << 11)) != 0)) {
   11214 				printf("XXX need workaround\n");
   11215 			}
   11216 		}
   11217 
   11218 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11219 			rv = wm_gmii_mdic_writereg(dev, 1,
   11220 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11221 			if (rv != 0)
   11222 				return rv;
   11223 		}
   11224 	}
   11225 
   11226 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11227 }
   11228 
   11229 /*
   11230  * wm_gmii_82580_readreg:	[mii interface function]
   11231  *
   11232  *	Read a PHY register on the 82580 and I350.
   11233  * This could be handled by the PHY layer if we didn't have to lock the
   11234  * ressource ...
   11235  */
   11236 static int
   11237 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11238 {
   11239 	struct wm_softc *sc = device_private(dev);
   11240 	int rv;
   11241 
   11242 	if (sc->phy.acquire(sc) != 0) {
   11243 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11244 		return -1;
   11245 	}
   11246 
   11247 #ifdef DIAGNOSTIC
   11248 	if (reg > MII_ADDRMASK) {
   11249 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11250 		    __func__, sc->sc_phytype, reg);
   11251 		reg &= MII_ADDRMASK;
   11252 	}
   11253 #endif
   11254 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11255 
   11256 	sc->phy.release(sc);
   11257 	return rv;
   11258 }
   11259 
   11260 /*
   11261  * wm_gmii_82580_writereg:	[mii interface function]
   11262  *
   11263  *	Write a PHY register on the 82580 and I350.
   11264  * This could be handled by the PHY layer if we didn't have to lock the
   11265  * ressource ...
   11266  */
   11267 static int
   11268 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11269 {
   11270 	struct wm_softc *sc = device_private(dev);
   11271 	int rv;
   11272 
   11273 	if (sc->phy.acquire(sc) != 0) {
   11274 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11275 		return -1;
   11276 	}
   11277 
   11278 #ifdef DIAGNOSTIC
   11279 	if (reg > MII_ADDRMASK) {
   11280 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11281 		    __func__, sc->sc_phytype, reg);
   11282 		reg &= MII_ADDRMASK;
   11283 	}
   11284 #endif
   11285 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11286 
   11287 	sc->phy.release(sc);
   11288 	return rv;
   11289 }
   11290 
   11291 /*
   11292  * wm_gmii_gs40g_readreg:	[mii interface function]
   11293  *
   11294  *	Read a PHY register on the I2100 and I211.
   11295  * This could be handled by the PHY layer if we didn't have to lock the
   11296  * ressource ...
   11297  */
   11298 static int
   11299 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11300 {
   11301 	struct wm_softc *sc = device_private(dev);
   11302 	int page, offset;
   11303 	int rv;
   11304 
   11305 	/* Acquire semaphore */
   11306 	if (sc->phy.acquire(sc)) {
   11307 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11308 		return -1;
   11309 	}
   11310 
   11311 	/* Page select */
   11312 	page = reg >> GS40G_PAGE_SHIFT;
   11313 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11314 	if (rv != 0)
   11315 		goto release;
   11316 
   11317 	/* Read reg */
   11318 	offset = reg & GS40G_OFFSET_MASK;
   11319 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11320 
   11321 release:
   11322 	sc->phy.release(sc);
   11323 	return rv;
   11324 }
   11325 
   11326 /*
   11327  * wm_gmii_gs40g_writereg:	[mii interface function]
   11328  *
   11329  *	Write a PHY register on the I210 and I211.
   11330  * This could be handled by the PHY layer if we didn't have to lock the
   11331  * ressource ...
   11332  */
   11333 static int
   11334 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11335 {
   11336 	struct wm_softc *sc = device_private(dev);
   11337 	uint16_t page;
   11338 	int offset, rv;
   11339 
   11340 	/* Acquire semaphore */
   11341 	if (sc->phy.acquire(sc)) {
   11342 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11343 		return -1;
   11344 	}
   11345 
   11346 	/* Page select */
   11347 	page = reg >> GS40G_PAGE_SHIFT;
   11348 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11349 	if (rv != 0)
   11350 		goto release;
   11351 
   11352 	/* Write reg */
   11353 	offset = reg & GS40G_OFFSET_MASK;
   11354 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11355 
   11356 release:
   11357 	/* Release semaphore */
   11358 	sc->phy.release(sc);
   11359 	return rv;
   11360 }
   11361 
   11362 /*
   11363  * wm_gmii_statchg:	[mii interface function]
   11364  *
   11365  *	Callback from MII layer when media changes.
   11366  */
   11367 static void
   11368 wm_gmii_statchg(struct ifnet *ifp)
   11369 {
   11370 	struct wm_softc *sc = ifp->if_softc;
   11371 	struct mii_data *mii = &sc->sc_mii;
   11372 
   11373 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11374 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11375 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11376 
   11377 	/*
   11378 	 * Get flow control negotiation result.
   11379 	 */
   11380 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11381 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11382 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11383 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11384 	}
   11385 
   11386 	if (sc->sc_flowflags & IFM_FLOW) {
   11387 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11388 			sc->sc_ctrl |= CTRL_TFCE;
   11389 			sc->sc_fcrtl |= FCRTL_XONE;
   11390 		}
   11391 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11392 			sc->sc_ctrl |= CTRL_RFCE;
   11393 	}
   11394 
   11395 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   11396 		DPRINTF(WM_DEBUG_LINK,
   11397 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11398 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11399 	} else {
   11400 		DPRINTF(WM_DEBUG_LINK,
   11401 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11402 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11403 	}
   11404 
   11405 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11406 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11407 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11408 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11409 	if (sc->sc_type == WM_T_80003) {
   11410 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   11411 		case IFM_1000_T:
   11412 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11413 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11414 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11415 			break;
   11416 		default:
   11417 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11418 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11419 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11420 			break;
   11421 		}
   11422 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11423 	}
   11424 }
   11425 
   11426 /* kumeran related (80003, ICH* and PCH*) */
   11427 
   11428 /*
   11429  * wm_kmrn_readreg:
   11430  *
   11431  *	Read a kumeran register
   11432  */
   11433 static int
   11434 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11435 {
   11436 	int rv;
   11437 
   11438 	if (sc->sc_type == WM_T_80003)
   11439 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11440 	else
   11441 		rv = sc->phy.acquire(sc);
   11442 	if (rv != 0) {
   11443 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11444 		    __func__);
   11445 		return rv;
   11446 	}
   11447 
   11448 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11449 
   11450 	if (sc->sc_type == WM_T_80003)
   11451 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11452 	else
   11453 		sc->phy.release(sc);
   11454 
   11455 	return rv;
   11456 }
   11457 
   11458 static int
   11459 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11460 {
   11461 
   11462 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11463 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11464 	    KUMCTRLSTA_REN);
   11465 	CSR_WRITE_FLUSH(sc);
   11466 	delay(2);
   11467 
   11468 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11469 
   11470 	return 0;
   11471 }
   11472 
   11473 /*
   11474  * wm_kmrn_writereg:
   11475  *
   11476  *	Write a kumeran register
   11477  */
   11478 static int
   11479 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11480 {
   11481 	int rv;
   11482 
   11483 	if (sc->sc_type == WM_T_80003)
   11484 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11485 	else
   11486 		rv = sc->phy.acquire(sc);
   11487 	if (rv != 0) {
   11488 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11489 		    __func__);
   11490 		return rv;
   11491 	}
   11492 
   11493 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11494 
   11495 	if (sc->sc_type == WM_T_80003)
   11496 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11497 	else
   11498 		sc->phy.release(sc);
   11499 
   11500 	return rv;
   11501 }
   11502 
   11503 static int
   11504 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11505 {
   11506 
   11507 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11508 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11509 
   11510 	return 0;
   11511 }
   11512 
   11513 /*
   11514  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11515  * This access method is different from IEEE MMD.
   11516  */
   11517 static int
   11518 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11519 {
   11520 	struct wm_softc *sc = device_private(dev);
   11521 	int rv;
   11522 
   11523 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11524 	if (rv != 0)
   11525 		return rv;
   11526 
   11527 	if (rd)
   11528 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11529 	else
   11530 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11531 	return rv;
   11532 }
   11533 
   11534 static int
   11535 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11536 {
   11537 
   11538 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11539 }
   11540 
   11541 static int
   11542 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11543 {
   11544 
   11545 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11546 }
   11547 
   11548 /* SGMII related */
   11549 
   11550 /*
   11551  * wm_sgmii_uses_mdio
   11552  *
   11553  * Check whether the transaction is to the internal PHY or the external
   11554  * MDIO interface. Return true if it's MDIO.
   11555  */
   11556 static bool
   11557 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11558 {
   11559 	uint32_t reg;
   11560 	bool ismdio = false;
   11561 
   11562 	switch (sc->sc_type) {
   11563 	case WM_T_82575:
   11564 	case WM_T_82576:
   11565 		reg = CSR_READ(sc, WMREG_MDIC);
   11566 		ismdio = ((reg & MDIC_DEST) != 0);
   11567 		break;
   11568 	case WM_T_82580:
   11569 	case WM_T_I350:
   11570 	case WM_T_I354:
   11571 	case WM_T_I210:
   11572 	case WM_T_I211:
   11573 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11574 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11575 		break;
   11576 	default:
   11577 		break;
   11578 	}
   11579 
   11580 	return ismdio;
   11581 }
   11582 
   11583 /*
   11584  * wm_sgmii_readreg:	[mii interface function]
   11585  *
   11586  *	Read a PHY register on the SGMII
   11587  * This could be handled by the PHY layer if we didn't have to lock the
   11588  * ressource ...
   11589  */
   11590 static int
   11591 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11592 {
   11593 	struct wm_softc *sc = device_private(dev);
   11594 	int rv;
   11595 
   11596 	if (sc->phy.acquire(sc)) {
   11597 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11598 		return -1;
   11599 	}
   11600 
   11601 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11602 
   11603 	sc->phy.release(sc);
   11604 	return rv;
   11605 }
   11606 
   11607 static int
   11608 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11609 {
   11610 	struct wm_softc *sc = device_private(dev);
   11611 	uint32_t i2ccmd;
   11612 	int i, rv;
   11613 
   11614 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11615 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11616 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11617 
   11618 	/* Poll the ready bit */
   11619 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11620 		delay(50);
   11621 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11622 		if (i2ccmd & I2CCMD_READY)
   11623 			break;
   11624 	}
   11625 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11626 		device_printf(dev, "I2CCMD Read did not complete\n");
   11627 		rv = ETIMEDOUT;
   11628 	}
   11629 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11630 		device_printf(dev, "I2CCMD Error bit set\n");
   11631 		rv = EIO;
   11632 	}
   11633 
   11634 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11635 
   11636 	return rv;
   11637 }
   11638 
   11639 /*
   11640  * wm_sgmii_writereg:	[mii interface function]
   11641  *
   11642  *	Write a PHY register on the SGMII.
   11643  * This could be handled by the PHY layer if we didn't have to lock the
   11644  * ressource ...
   11645  */
   11646 static int
   11647 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11648 {
   11649 	struct wm_softc *sc = device_private(dev);
   11650 	int rv;
   11651 
   11652 	if (sc->phy.acquire(sc) != 0) {
   11653 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11654 		return -1;
   11655 	}
   11656 
   11657 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11658 
   11659 	sc->phy.release(sc);
   11660 
   11661 	return rv;
   11662 }
   11663 
   11664 static int
   11665 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11666 {
   11667 	struct wm_softc *sc = device_private(dev);
   11668 	uint32_t i2ccmd;
   11669 	uint16_t swapdata;
   11670 	int rv = 0;
   11671 	int i;
   11672 
   11673 	/* Swap the data bytes for the I2C interface */
   11674 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11675 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11676 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11677 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11678 
   11679 	/* Poll the ready bit */
   11680 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11681 		delay(50);
   11682 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11683 		if (i2ccmd & I2CCMD_READY)
   11684 			break;
   11685 	}
   11686 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11687 		device_printf(dev, "I2CCMD Write did not complete\n");
   11688 		rv = ETIMEDOUT;
   11689 	}
   11690 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11691 		device_printf(dev, "I2CCMD Error bit set\n");
   11692 		rv = EIO;
   11693 	}
   11694 
   11695 	return rv;
   11696 }
   11697 
   11698 /* TBI related */
   11699 
   11700 static bool
   11701 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11702 {
   11703 	bool sig;
   11704 
   11705 	sig = ctrl & CTRL_SWDPIN(1);
   11706 
   11707 	/*
   11708 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11709 	 * detect a signal, 1 if they don't.
   11710 	 */
   11711 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11712 		sig = !sig;
   11713 
   11714 	return sig;
   11715 }
   11716 
   11717 /*
   11718  * wm_tbi_mediainit:
   11719  *
   11720  *	Initialize media for use on 1000BASE-X devices.
   11721  */
   11722 static void
   11723 wm_tbi_mediainit(struct wm_softc *sc)
   11724 {
   11725 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11726 	const char *sep = "";
   11727 
   11728 	if (sc->sc_type < WM_T_82543)
   11729 		sc->sc_tipg = TIPG_WM_DFLT;
   11730 	else
   11731 		sc->sc_tipg = TIPG_LG_DFLT;
   11732 
   11733 	sc->sc_tbi_serdes_anegticks = 5;
   11734 
   11735 	/* Initialize our media structures */
   11736 	sc->sc_mii.mii_ifp = ifp;
   11737 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11738 
   11739 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11740 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11741 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11742 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11743 	else
   11744 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11745 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11746 
   11747 	/*
   11748 	 * SWD Pins:
   11749 	 *
   11750 	 *	0 = Link LED (output)
   11751 	 *	1 = Loss Of Signal (input)
   11752 	 */
   11753 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11754 
   11755 	/* XXX Perhaps this is only for TBI */
   11756 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11757 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11758 
   11759 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11760 		sc->sc_ctrl &= ~CTRL_LRST;
   11761 
   11762 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11763 
   11764 #define	ADD(ss, mm, dd)							\
   11765 do {									\
   11766 	aprint_normal("%s%s", sep, ss);					\
   11767 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11768 	sep = ", ";							\
   11769 } while (/*CONSTCOND*/0)
   11770 
   11771 	aprint_normal_dev(sc->sc_dev, "");
   11772 
   11773 	if (sc->sc_type == WM_T_I354) {
   11774 		uint32_t status;
   11775 
   11776 		status = CSR_READ(sc, WMREG_STATUS);
   11777 		if (((status & STATUS_2P5_SKU) != 0)
   11778 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11779 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11780 		} else
   11781 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11782 	} else if (sc->sc_type == WM_T_82545) {
   11783 		/* Only 82545 is LX (XXX except SFP) */
   11784 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11785 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11786 	} else {
   11787 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11788 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11789 	}
   11790 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11791 	aprint_normal("\n");
   11792 
   11793 #undef ADD
   11794 
   11795 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11796 }
   11797 
   11798 /*
   11799  * wm_tbi_mediachange:	[ifmedia interface function]
   11800  *
   11801  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11802  */
   11803 static int
   11804 wm_tbi_mediachange(struct ifnet *ifp)
   11805 {
   11806 	struct wm_softc *sc = ifp->if_softc;
   11807 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11808 	uint32_t status, ctrl;
   11809 	bool signal;
   11810 	int i;
   11811 
   11812 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11813 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11814 		/* XXX need some work for >= 82571 and < 82575 */
   11815 		if (sc->sc_type < WM_T_82575)
   11816 			return 0;
   11817 	}
   11818 
   11819 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11820 	    || (sc->sc_type >= WM_T_82575))
   11821 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11822 
   11823 	sc->sc_ctrl &= ~CTRL_LRST;
   11824 	sc->sc_txcw = TXCW_ANE;
   11825 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11826 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11827 	else if (ife->ifm_media & IFM_FDX)
   11828 		sc->sc_txcw |= TXCW_FD;
   11829 	else
   11830 		sc->sc_txcw |= TXCW_HD;
   11831 
   11832 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11833 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11834 
   11835 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11836 		device_xname(sc->sc_dev), sc->sc_txcw));
   11837 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11838 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11839 	CSR_WRITE_FLUSH(sc);
   11840 	delay(1000);
   11841 
   11842 	ctrl =  CSR_READ(sc, WMREG_CTRL);
   11843 	signal = wm_tbi_havesignal(sc, ctrl);
   11844 
   11845 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11846 		signal));
   11847 
   11848 	if (signal) {
   11849 		/* Have signal; wait for the link to come up. */
   11850 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11851 			delay(10000);
   11852 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11853 				break;
   11854 		}
   11855 
   11856 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11857 			device_xname(sc->sc_dev),i));
   11858 
   11859 		status = CSR_READ(sc, WMREG_STATUS);
   11860 		DPRINTF(WM_DEBUG_LINK,
   11861 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11862 			device_xname(sc->sc_dev),status, STATUS_LU));
   11863 		if (status & STATUS_LU) {
   11864 			/* Link is up. */
   11865 			DPRINTF(WM_DEBUG_LINK,
   11866 			    ("%s: LINK: set media -> link up %s\n",
   11867 				device_xname(sc->sc_dev),
   11868 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11869 
   11870 			/*
   11871 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11872 			 * so we should update sc->sc_ctrl
   11873 			 */
   11874 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11875 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11876 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11877 			if (status & STATUS_FD)
   11878 				sc->sc_tctl |=
   11879 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11880 			else
   11881 				sc->sc_tctl |=
   11882 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11883 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11884 				sc->sc_fcrtl |= FCRTL_XONE;
   11885 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11886 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11887 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11888 			sc->sc_tbi_linkup = 1;
   11889 		} else {
   11890 			if (i == WM_LINKUP_TIMEOUT)
   11891 				wm_check_for_link(sc);
   11892 			/* Link is down. */
   11893 			DPRINTF(WM_DEBUG_LINK,
   11894 			    ("%s: LINK: set media -> link down\n",
   11895 				device_xname(sc->sc_dev)));
   11896 			sc->sc_tbi_linkup = 0;
   11897 		}
   11898 	} else {
   11899 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11900 			device_xname(sc->sc_dev)));
   11901 		sc->sc_tbi_linkup = 0;
   11902 	}
   11903 
   11904 	wm_tbi_serdes_set_linkled(sc);
   11905 
   11906 	return 0;
   11907 }
   11908 
   11909 /*
   11910  * wm_tbi_mediastatus:	[ifmedia interface function]
   11911  *
   11912  *	Get the current interface media status on a 1000BASE-X device.
   11913  */
   11914 static void
   11915 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11916 {
   11917 	struct wm_softc *sc = ifp->if_softc;
   11918 	uint32_t ctrl, status;
   11919 
   11920 	ifmr->ifm_status = IFM_AVALID;
   11921 	ifmr->ifm_active = IFM_ETHER;
   11922 
   11923 	status = CSR_READ(sc, WMREG_STATUS);
   11924 	if ((status & STATUS_LU) == 0) {
   11925 		ifmr->ifm_active |= IFM_NONE;
   11926 		return;
   11927 	}
   11928 
   11929 	ifmr->ifm_status |= IFM_ACTIVE;
   11930 	/* Only 82545 is LX */
   11931 	if (sc->sc_type == WM_T_82545)
   11932 		ifmr->ifm_active |= IFM_1000_LX;
   11933 	else
   11934 		ifmr->ifm_active |= IFM_1000_SX;
   11935 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11936 		ifmr->ifm_active |= IFM_FDX;
   11937 	else
   11938 		ifmr->ifm_active |= IFM_HDX;
   11939 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11940 	if (ctrl & CTRL_RFCE)
   11941 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11942 	if (ctrl & CTRL_TFCE)
   11943 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11944 }
   11945 
   11946 /* XXX TBI only */
   11947 static int
   11948 wm_check_for_link(struct wm_softc *sc)
   11949 {
   11950 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11951 	uint32_t rxcw;
   11952 	uint32_t ctrl;
   11953 	uint32_t status;
   11954 	bool signal;
   11955 
   11956 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11957 		device_xname(sc->sc_dev), __func__));
   11958 
   11959 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11960 		/* XXX need some work for >= 82571 */
   11961 		if (sc->sc_type >= WM_T_82571) {
   11962 			sc->sc_tbi_linkup = 1;
   11963 			return 0;
   11964 		}
   11965 	}
   11966 
   11967 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11968 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11969 	status = CSR_READ(sc, WMREG_STATUS);
   11970 	signal = wm_tbi_havesignal(sc, ctrl);
   11971 
   11972 	DPRINTF(WM_DEBUG_LINK,
   11973 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11974 		device_xname(sc->sc_dev), __func__, signal,
   11975 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11976 
   11977 	/*
   11978 	 * SWDPIN   LU RXCW
   11979 	 *	0    0	  0
   11980 	 *	0    0	  1	(should not happen)
   11981 	 *	0    1	  0	(should not happen)
   11982 	 *	0    1	  1	(should not happen)
   11983 	 *	1    0	  0	Disable autonego and force linkup
   11984 	 *	1    0	  1	got /C/ but not linkup yet
   11985 	 *	1    1	  0	(linkup)
   11986 	 *	1    1	  1	If IFM_AUTO, back to autonego
   11987 	 *
   11988 	 */
   11989 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   11990 		DPRINTF(WM_DEBUG_LINK,
   11991 		    ("%s: %s: force linkup and fullduplex\n",
   11992 			device_xname(sc->sc_dev), __func__));
   11993 		sc->sc_tbi_linkup = 0;
   11994 		/* Disable auto-negotiation in the TXCW register */
   11995 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11996 
   11997 		/*
   11998 		 * Force link-up and also force full-duplex.
   11999 		 *
   12000 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12001 		 * so we should update sc->sc_ctrl
   12002 		 */
   12003 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12004 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12005 	} else if (((status & STATUS_LU) != 0)
   12006 	    && ((rxcw & RXCW_C) != 0)
   12007 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12008 		sc->sc_tbi_linkup = 1;
   12009 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12010 			device_xname(sc->sc_dev),
   12011 			__func__));
   12012 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12013 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12014 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12015 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12016 			device_xname(sc->sc_dev), __func__));
   12017 	} else {
   12018 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12019 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12020 			status));
   12021 	}
   12022 
   12023 	return 0;
   12024 }
   12025 
   12026 /*
   12027  * wm_tbi_tick:
   12028  *
   12029  *	Check the link on TBI devices.
   12030  *	This function acts as mii_tick().
   12031  */
   12032 static void
   12033 wm_tbi_tick(struct wm_softc *sc)
   12034 {
   12035 	struct mii_data *mii = &sc->sc_mii;
   12036 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12037 	uint32_t status;
   12038 
   12039 	KASSERT(WM_CORE_LOCKED(sc));
   12040 
   12041 	status = CSR_READ(sc, WMREG_STATUS);
   12042 
   12043 	/* XXX is this needed? */
   12044 	(void)CSR_READ(sc, WMREG_RXCW);
   12045 	(void)CSR_READ(sc, WMREG_CTRL);
   12046 
   12047 	/* set link status */
   12048 	if ((status & STATUS_LU) == 0) {
   12049 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12050 			device_xname(sc->sc_dev)));
   12051 		sc->sc_tbi_linkup = 0;
   12052 	} else if (sc->sc_tbi_linkup == 0) {
   12053 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12054 			device_xname(sc->sc_dev),
   12055 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12056 		sc->sc_tbi_linkup = 1;
   12057 		sc->sc_tbi_serdes_ticks = 0;
   12058 	}
   12059 
   12060 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12061 		goto setled;
   12062 
   12063 	if ((status & STATUS_LU) == 0) {
   12064 		sc->sc_tbi_linkup = 0;
   12065 		/* If the timer expired, retry autonegotiation */
   12066 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12067 		    && (++sc->sc_tbi_serdes_ticks
   12068 			>= sc->sc_tbi_serdes_anegticks)) {
   12069 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12070 			sc->sc_tbi_serdes_ticks = 0;
   12071 			/*
   12072 			 * Reset the link, and let autonegotiation do
   12073 			 * its thing
   12074 			 */
   12075 			sc->sc_ctrl |= CTRL_LRST;
   12076 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12077 			CSR_WRITE_FLUSH(sc);
   12078 			delay(1000);
   12079 			sc->sc_ctrl &= ~CTRL_LRST;
   12080 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12081 			CSR_WRITE_FLUSH(sc);
   12082 			delay(1000);
   12083 			CSR_WRITE(sc, WMREG_TXCW,
   12084 			    sc->sc_txcw & ~TXCW_ANE);
   12085 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12086 		}
   12087 	}
   12088 
   12089 setled:
   12090 	wm_tbi_serdes_set_linkled(sc);
   12091 }
   12092 
   12093 /* SERDES related */
   12094 static void
   12095 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12096 {
   12097 	uint32_t reg;
   12098 
   12099 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12100 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12101 		return;
   12102 
   12103 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12104 	reg |= PCS_CFG_PCS_EN;
   12105 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12106 
   12107 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12108 	reg &= ~CTRL_EXT_SWDPIN(3);
   12109 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12110 	CSR_WRITE_FLUSH(sc);
   12111 }
   12112 
   12113 static int
   12114 wm_serdes_mediachange(struct ifnet *ifp)
   12115 {
   12116 	struct wm_softc *sc = ifp->if_softc;
   12117 	bool pcs_autoneg = true; /* XXX */
   12118 	uint32_t ctrl_ext, pcs_lctl, reg;
   12119 
   12120 	/* XXX Currently, this function is not called on 8257[12] */
   12121 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12122 	    || (sc->sc_type >= WM_T_82575))
   12123 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12124 
   12125 	wm_serdes_power_up_link_82575(sc);
   12126 
   12127 	sc->sc_ctrl |= CTRL_SLU;
   12128 
   12129 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12130 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12131 
   12132 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12133 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12134 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12135 	case CTRL_EXT_LINK_MODE_SGMII:
   12136 		pcs_autoneg = true;
   12137 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12138 		break;
   12139 	case CTRL_EXT_LINK_MODE_1000KX:
   12140 		pcs_autoneg = false;
   12141 		/* FALLTHROUGH */
   12142 	default:
   12143 		if ((sc->sc_type == WM_T_82575)
   12144 		    || (sc->sc_type == WM_T_82576)) {
   12145 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12146 				pcs_autoneg = false;
   12147 		}
   12148 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12149 		    | CTRL_FRCFDX;
   12150 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12151 	}
   12152 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12153 
   12154 	if (pcs_autoneg) {
   12155 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12156 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12157 
   12158 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12159 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12160 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12161 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12162 	} else
   12163 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12164 
   12165 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12166 
   12167 
   12168 	return 0;
   12169 }
   12170 
   12171 static void
   12172 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12173 {
   12174 	struct wm_softc *sc = ifp->if_softc;
   12175 	struct mii_data *mii = &sc->sc_mii;
   12176 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12177 	uint32_t pcs_adv, pcs_lpab, reg;
   12178 
   12179 	ifmr->ifm_status = IFM_AVALID;
   12180 	ifmr->ifm_active = IFM_ETHER;
   12181 
   12182 	/* Check PCS */
   12183 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12184 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12185 		ifmr->ifm_active |= IFM_NONE;
   12186 		sc->sc_tbi_linkup = 0;
   12187 		goto setled;
   12188 	}
   12189 
   12190 	sc->sc_tbi_linkup = 1;
   12191 	ifmr->ifm_status |= IFM_ACTIVE;
   12192 	if (sc->sc_type == WM_T_I354) {
   12193 		uint32_t status;
   12194 
   12195 		status = CSR_READ(sc, WMREG_STATUS);
   12196 		if (((status & STATUS_2P5_SKU) != 0)
   12197 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12198 			ifmr->ifm_active |= IFM_2500_KX;
   12199 		} else
   12200 			ifmr->ifm_active |= IFM_1000_KX;
   12201 	} else {
   12202 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12203 		case PCS_LSTS_SPEED_10:
   12204 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12205 			break;
   12206 		case PCS_LSTS_SPEED_100:
   12207 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12208 			break;
   12209 		case PCS_LSTS_SPEED_1000:
   12210 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12211 			break;
   12212 		default:
   12213 			device_printf(sc->sc_dev, "Unknown speed\n");
   12214 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12215 			break;
   12216 		}
   12217 	}
   12218 	if ((reg & PCS_LSTS_FDX) != 0)
   12219 		ifmr->ifm_active |= IFM_FDX;
   12220 	else
   12221 		ifmr->ifm_active |= IFM_HDX;
   12222 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12223 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12224 		/* Check flow */
   12225 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12226 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12227 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12228 			goto setled;
   12229 		}
   12230 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12231 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12232 		DPRINTF(WM_DEBUG_LINK,
   12233 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12234 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12235 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12236 			mii->mii_media_active |= IFM_FLOW
   12237 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12238 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12239 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12240 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12241 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12242 			mii->mii_media_active |= IFM_FLOW
   12243 			    | IFM_ETH_TXPAUSE;
   12244 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12245 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12246 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12247 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12248 			mii->mii_media_active |= IFM_FLOW
   12249 			    | IFM_ETH_RXPAUSE;
   12250 		}
   12251 	}
   12252 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12253 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12254 setled:
   12255 	wm_tbi_serdes_set_linkled(sc);
   12256 }
   12257 
   12258 /*
   12259  * wm_serdes_tick:
   12260  *
   12261  *	Check the link on serdes devices.
   12262  */
   12263 static void
   12264 wm_serdes_tick(struct wm_softc *sc)
   12265 {
   12266 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12267 	struct mii_data *mii = &sc->sc_mii;
   12268 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12269 	uint32_t reg;
   12270 
   12271 	KASSERT(WM_CORE_LOCKED(sc));
   12272 
   12273 	mii->mii_media_status = IFM_AVALID;
   12274 	mii->mii_media_active = IFM_ETHER;
   12275 
   12276 	/* Check PCS */
   12277 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12278 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12279 		mii->mii_media_status |= IFM_ACTIVE;
   12280 		sc->sc_tbi_linkup = 1;
   12281 		sc->sc_tbi_serdes_ticks = 0;
   12282 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12283 		if ((reg & PCS_LSTS_FDX) != 0)
   12284 			mii->mii_media_active |= IFM_FDX;
   12285 		else
   12286 			mii->mii_media_active |= IFM_HDX;
   12287 	} else {
   12288 		mii->mii_media_status |= IFM_NONE;
   12289 		sc->sc_tbi_linkup = 0;
   12290 		/* If the timer expired, retry autonegotiation */
   12291 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12292 		    && (++sc->sc_tbi_serdes_ticks
   12293 			>= sc->sc_tbi_serdes_anegticks)) {
   12294 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12295 			sc->sc_tbi_serdes_ticks = 0;
   12296 			/* XXX */
   12297 			wm_serdes_mediachange(ifp);
   12298 		}
   12299 	}
   12300 
   12301 	wm_tbi_serdes_set_linkled(sc);
   12302 }
   12303 
   12304 /* SFP related */
   12305 
   12306 static int
   12307 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12308 {
   12309 	uint32_t i2ccmd;
   12310 	int i;
   12311 
   12312 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12313 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12314 
   12315 	/* Poll the ready bit */
   12316 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12317 		delay(50);
   12318 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12319 		if (i2ccmd & I2CCMD_READY)
   12320 			break;
   12321 	}
   12322 	if ((i2ccmd & I2CCMD_READY) == 0)
   12323 		return -1;
   12324 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12325 		return -1;
   12326 
   12327 	*data = i2ccmd & 0x00ff;
   12328 
   12329 	return 0;
   12330 }
   12331 
   12332 static uint32_t
   12333 wm_sfp_get_media_type(struct wm_softc *sc)
   12334 {
   12335 	uint32_t ctrl_ext;
   12336 	uint8_t val = 0;
   12337 	int timeout = 3;
   12338 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12339 	int rv = -1;
   12340 
   12341 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12342 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12343 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12344 	CSR_WRITE_FLUSH(sc);
   12345 
   12346 	/* Read SFP module data */
   12347 	while (timeout) {
   12348 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12349 		if (rv == 0)
   12350 			break;
   12351 		delay(100*1000); /* XXX too big */
   12352 		timeout--;
   12353 	}
   12354 	if (rv != 0)
   12355 		goto out;
   12356 	switch (val) {
   12357 	case SFF_SFP_ID_SFF:
   12358 		aprint_normal_dev(sc->sc_dev,
   12359 		    "Module/Connector soldered to board\n");
   12360 		break;
   12361 	case SFF_SFP_ID_SFP:
   12362 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   12363 		break;
   12364 	case SFF_SFP_ID_UNKNOWN:
   12365 		goto out;
   12366 	default:
   12367 		break;
   12368 	}
   12369 
   12370 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12371 	if (rv != 0) {
   12372 		goto out;
   12373 	}
   12374 
   12375 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12376 		mediatype = WM_MEDIATYPE_SERDES;
   12377 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12378 		sc->sc_flags |= WM_F_SGMII;
   12379 		mediatype = WM_MEDIATYPE_COPPER;
   12380 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12381 		sc->sc_flags |= WM_F_SGMII;
   12382 		mediatype = WM_MEDIATYPE_SERDES;
   12383 	}
   12384 
   12385 out:
   12386 	/* Restore I2C interface setting */
   12387 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12388 
   12389 	return mediatype;
   12390 }
   12391 
   12392 /*
   12393  * NVM related.
   12394  * Microwire, SPI (w/wo EERD) and Flash.
   12395  */
   12396 
   12397 /* Both spi and uwire */
   12398 
   12399 /*
   12400  * wm_eeprom_sendbits:
   12401  *
   12402  *	Send a series of bits to the EEPROM.
   12403  */
   12404 static void
   12405 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12406 {
   12407 	uint32_t reg;
   12408 	int x;
   12409 
   12410 	reg = CSR_READ(sc, WMREG_EECD);
   12411 
   12412 	for (x = nbits; x > 0; x--) {
   12413 		if (bits & (1U << (x - 1)))
   12414 			reg |= EECD_DI;
   12415 		else
   12416 			reg &= ~EECD_DI;
   12417 		CSR_WRITE(sc, WMREG_EECD, reg);
   12418 		CSR_WRITE_FLUSH(sc);
   12419 		delay(2);
   12420 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12421 		CSR_WRITE_FLUSH(sc);
   12422 		delay(2);
   12423 		CSR_WRITE(sc, WMREG_EECD, reg);
   12424 		CSR_WRITE_FLUSH(sc);
   12425 		delay(2);
   12426 	}
   12427 }
   12428 
   12429 /*
   12430  * wm_eeprom_recvbits:
   12431  *
   12432  *	Receive a series of bits from the EEPROM.
   12433  */
   12434 static void
   12435 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12436 {
   12437 	uint32_t reg, val;
   12438 	int x;
   12439 
   12440 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12441 
   12442 	val = 0;
   12443 	for (x = nbits; x > 0; x--) {
   12444 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12445 		CSR_WRITE_FLUSH(sc);
   12446 		delay(2);
   12447 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12448 			val |= (1U << (x - 1));
   12449 		CSR_WRITE(sc, WMREG_EECD, reg);
   12450 		CSR_WRITE_FLUSH(sc);
   12451 		delay(2);
   12452 	}
   12453 	*valp = val;
   12454 }
   12455 
   12456 /* Microwire */
   12457 
   12458 /*
   12459  * wm_nvm_read_uwire:
   12460  *
   12461  *	Read a word from the EEPROM using the MicroWire protocol.
   12462  */
   12463 static int
   12464 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12465 {
   12466 	uint32_t reg, val;
   12467 	int i;
   12468 
   12469 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12470 		device_xname(sc->sc_dev), __func__));
   12471 
   12472 	if (sc->nvm.acquire(sc) != 0)
   12473 		return -1;
   12474 
   12475 	for (i = 0; i < wordcnt; i++) {
   12476 		/* Clear SK and DI. */
   12477 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12478 		CSR_WRITE(sc, WMREG_EECD, reg);
   12479 
   12480 		/*
   12481 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12482 		 * and Xen.
   12483 		 *
   12484 		 * We use this workaround only for 82540 because qemu's
   12485 		 * e1000 act as 82540.
   12486 		 */
   12487 		if (sc->sc_type == WM_T_82540) {
   12488 			reg |= EECD_SK;
   12489 			CSR_WRITE(sc, WMREG_EECD, reg);
   12490 			reg &= ~EECD_SK;
   12491 			CSR_WRITE(sc, WMREG_EECD, reg);
   12492 			CSR_WRITE_FLUSH(sc);
   12493 			delay(2);
   12494 		}
   12495 		/* XXX: end of workaround */
   12496 
   12497 		/* Set CHIP SELECT. */
   12498 		reg |= EECD_CS;
   12499 		CSR_WRITE(sc, WMREG_EECD, reg);
   12500 		CSR_WRITE_FLUSH(sc);
   12501 		delay(2);
   12502 
   12503 		/* Shift in the READ command. */
   12504 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12505 
   12506 		/* Shift in address. */
   12507 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12508 
   12509 		/* Shift out the data. */
   12510 		wm_eeprom_recvbits(sc, &val, 16);
   12511 		data[i] = val & 0xffff;
   12512 
   12513 		/* Clear CHIP SELECT. */
   12514 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12515 		CSR_WRITE(sc, WMREG_EECD, reg);
   12516 		CSR_WRITE_FLUSH(sc);
   12517 		delay(2);
   12518 	}
   12519 
   12520 	sc->nvm.release(sc);
   12521 	return 0;
   12522 }
   12523 
   12524 /* SPI */
   12525 
   12526 /*
   12527  * Set SPI and FLASH related information from the EECD register.
   12528  * For 82541 and 82547, the word size is taken from EEPROM.
   12529  */
   12530 static int
   12531 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12532 {
   12533 	int size;
   12534 	uint32_t reg;
   12535 	uint16_t data;
   12536 
   12537 	reg = CSR_READ(sc, WMREG_EECD);
   12538 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12539 
   12540 	/* Read the size of NVM from EECD by default */
   12541 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12542 	switch (sc->sc_type) {
   12543 	case WM_T_82541:
   12544 	case WM_T_82541_2:
   12545 	case WM_T_82547:
   12546 	case WM_T_82547_2:
   12547 		/* Set dummy value to access EEPROM */
   12548 		sc->sc_nvm_wordsize = 64;
   12549 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12550 			aprint_error_dev(sc->sc_dev,
   12551 			    "%s: failed to read EEPROM size\n", __func__);
   12552 		}
   12553 		reg = data;
   12554 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12555 		if (size == 0)
   12556 			size = 6; /* 64 word size */
   12557 		else
   12558 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12559 		break;
   12560 	case WM_T_80003:
   12561 	case WM_T_82571:
   12562 	case WM_T_82572:
   12563 	case WM_T_82573: /* SPI case */
   12564 	case WM_T_82574: /* SPI case */
   12565 	case WM_T_82583: /* SPI case */
   12566 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12567 		if (size > 14)
   12568 			size = 14;
   12569 		break;
   12570 	case WM_T_82575:
   12571 	case WM_T_82576:
   12572 	case WM_T_82580:
   12573 	case WM_T_I350:
   12574 	case WM_T_I354:
   12575 	case WM_T_I210:
   12576 	case WM_T_I211:
   12577 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12578 		if (size > 15)
   12579 			size = 15;
   12580 		break;
   12581 	default:
   12582 		aprint_error_dev(sc->sc_dev,
   12583 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12584 		return -1;
   12585 		break;
   12586 	}
   12587 
   12588 	sc->sc_nvm_wordsize = 1 << size;
   12589 
   12590 	return 0;
   12591 }
   12592 
   12593 /*
   12594  * wm_nvm_ready_spi:
   12595  *
   12596  *	Wait for a SPI EEPROM to be ready for commands.
   12597  */
   12598 static int
   12599 wm_nvm_ready_spi(struct wm_softc *sc)
   12600 {
   12601 	uint32_t val;
   12602 	int usec;
   12603 
   12604 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12605 		device_xname(sc->sc_dev), __func__));
   12606 
   12607 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12608 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12609 		wm_eeprom_recvbits(sc, &val, 8);
   12610 		if ((val & SPI_SR_RDY) == 0)
   12611 			break;
   12612 	}
   12613 	if (usec >= SPI_MAX_RETRIES) {
   12614 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12615 		return -1;
   12616 	}
   12617 	return 0;
   12618 }
   12619 
   12620 /*
   12621  * wm_nvm_read_spi:
   12622  *
   12623  *	Read a work from the EEPROM using the SPI protocol.
   12624  */
   12625 static int
   12626 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12627 {
   12628 	uint32_t reg, val;
   12629 	int i;
   12630 	uint8_t opc;
   12631 	int rv = 0;
   12632 
   12633 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12634 		device_xname(sc->sc_dev), __func__));
   12635 
   12636 	if (sc->nvm.acquire(sc) != 0)
   12637 		return -1;
   12638 
   12639 	/* Clear SK and CS. */
   12640 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12641 	CSR_WRITE(sc, WMREG_EECD, reg);
   12642 	CSR_WRITE_FLUSH(sc);
   12643 	delay(2);
   12644 
   12645 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12646 		goto out;
   12647 
   12648 	/* Toggle CS to flush commands. */
   12649 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12650 	CSR_WRITE_FLUSH(sc);
   12651 	delay(2);
   12652 	CSR_WRITE(sc, WMREG_EECD, reg);
   12653 	CSR_WRITE_FLUSH(sc);
   12654 	delay(2);
   12655 
   12656 	opc = SPI_OPC_READ;
   12657 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12658 		opc |= SPI_OPC_A8;
   12659 
   12660 	wm_eeprom_sendbits(sc, opc, 8);
   12661 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12662 
   12663 	for (i = 0; i < wordcnt; i++) {
   12664 		wm_eeprom_recvbits(sc, &val, 16);
   12665 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12666 	}
   12667 
   12668 	/* Raise CS and clear SK. */
   12669 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12670 	CSR_WRITE(sc, WMREG_EECD, reg);
   12671 	CSR_WRITE_FLUSH(sc);
   12672 	delay(2);
   12673 
   12674 out:
   12675 	sc->nvm.release(sc);
   12676 	return rv;
   12677 }
   12678 
   12679 /* Using with EERD */
   12680 
   12681 static int
   12682 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12683 {
   12684 	uint32_t attempts = 100000;
   12685 	uint32_t i, reg = 0;
   12686 	int32_t done = -1;
   12687 
   12688 	for (i = 0; i < attempts; i++) {
   12689 		reg = CSR_READ(sc, rw);
   12690 
   12691 		if (reg & EERD_DONE) {
   12692 			done = 0;
   12693 			break;
   12694 		}
   12695 		delay(5);
   12696 	}
   12697 
   12698 	return done;
   12699 }
   12700 
   12701 static int
   12702 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12703 {
   12704 	int i, eerd = 0;
   12705 	int rv = 0;
   12706 
   12707 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12708 		device_xname(sc->sc_dev), __func__));
   12709 
   12710 	if (sc->nvm.acquire(sc) != 0)
   12711 		return -1;
   12712 
   12713 	for (i = 0; i < wordcnt; i++) {
   12714 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12715 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12716 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12717 		if (rv != 0) {
   12718 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12719 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12720 			break;
   12721 		}
   12722 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12723 	}
   12724 
   12725 	sc->nvm.release(sc);
   12726 	return rv;
   12727 }
   12728 
   12729 /* Flash */
   12730 
   12731 static int
   12732 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12733 {
   12734 	uint32_t eecd;
   12735 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12736 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12737 	uint32_t nvm_dword = 0;
   12738 	uint8_t sig_byte = 0;
   12739 	int rv;
   12740 
   12741 	switch (sc->sc_type) {
   12742 	case WM_T_PCH_SPT:
   12743 	case WM_T_PCH_CNP:
   12744 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12745 		act_offset = ICH_NVM_SIG_WORD * 2;
   12746 
   12747 		/* set bank to 0 in case flash read fails. */
   12748 		*bank = 0;
   12749 
   12750 		/* Check bank 0 */
   12751 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12752 		if (rv != 0)
   12753 			return rv;
   12754 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12755 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12756 			*bank = 0;
   12757 			return 0;
   12758 		}
   12759 
   12760 		/* Check bank 1 */
   12761 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12762 		    &nvm_dword);
   12763 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12764 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12765 			*bank = 1;
   12766 			return 0;
   12767 		}
   12768 		aprint_error_dev(sc->sc_dev,
   12769 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12770 		return -1;
   12771 	case WM_T_ICH8:
   12772 	case WM_T_ICH9:
   12773 		eecd = CSR_READ(sc, WMREG_EECD);
   12774 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12775 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12776 			return 0;
   12777 		}
   12778 		/* FALLTHROUGH */
   12779 	default:
   12780 		/* Default to 0 */
   12781 		*bank = 0;
   12782 
   12783 		/* Check bank 0 */
   12784 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12785 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12786 			*bank = 0;
   12787 			return 0;
   12788 		}
   12789 
   12790 		/* Check bank 1 */
   12791 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12792 		    &sig_byte);
   12793 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12794 			*bank = 1;
   12795 			return 0;
   12796 		}
   12797 	}
   12798 
   12799 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12800 		device_xname(sc->sc_dev)));
   12801 	return -1;
   12802 }
   12803 
   12804 /******************************************************************************
   12805  * This function does initial flash setup so that a new read/write/erase cycle
   12806  * can be started.
   12807  *
   12808  * sc - The pointer to the hw structure
   12809  ****************************************************************************/
   12810 static int32_t
   12811 wm_ich8_cycle_init(struct wm_softc *sc)
   12812 {
   12813 	uint16_t hsfsts;
   12814 	int32_t error = 1;
   12815 	int32_t i     = 0;
   12816 
   12817 	if (sc->sc_type >= WM_T_PCH_SPT)
   12818 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12819 	else
   12820 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12821 
   12822 	/* May be check the Flash Des Valid bit in Hw status */
   12823 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12824 		return error;
   12825 
   12826 	/* Clear FCERR in Hw status by writing 1 */
   12827 	/* Clear DAEL in Hw status by writing a 1 */
   12828 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12829 
   12830 	if (sc->sc_type >= WM_T_PCH_SPT)
   12831 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12832 	else
   12833 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12834 
   12835 	/*
   12836 	 * Either we should have a hardware SPI cycle in progress bit to check
   12837 	 * against, in order to start a new cycle or FDONE bit should be
   12838 	 * changed in the hardware so that it is 1 after harware reset, which
   12839 	 * can then be used as an indication whether a cycle is in progress or
   12840 	 * has been completed .. we should also have some software semaphore
   12841 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12842 	 * threads access to those bits can be sequentiallized or a way so that
   12843 	 * 2 threads dont start the cycle at the same time
   12844 	 */
   12845 
   12846 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12847 		/*
   12848 		 * There is no cycle running at present, so we can start a
   12849 		 * cycle
   12850 		 */
   12851 
   12852 		/* Begin by setting Flash Cycle Done. */
   12853 		hsfsts |= HSFSTS_DONE;
   12854 		if (sc->sc_type >= WM_T_PCH_SPT)
   12855 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12856 			    hsfsts & 0xffffUL);
   12857 		else
   12858 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12859 		error = 0;
   12860 	} else {
   12861 		/*
   12862 		 * otherwise poll for sometime so the current cycle has a
   12863 		 * chance to end before giving up.
   12864 		 */
   12865 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12866 			if (sc->sc_type >= WM_T_PCH_SPT)
   12867 				hsfsts = ICH8_FLASH_READ32(sc,
   12868 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12869 			else
   12870 				hsfsts = ICH8_FLASH_READ16(sc,
   12871 				    ICH_FLASH_HSFSTS);
   12872 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12873 				error = 0;
   12874 				break;
   12875 			}
   12876 			delay(1);
   12877 		}
   12878 		if (error == 0) {
   12879 			/*
   12880 			 * Successful in waiting for previous cycle to timeout,
   12881 			 * now set the Flash Cycle Done.
   12882 			 */
   12883 			hsfsts |= HSFSTS_DONE;
   12884 			if (sc->sc_type >= WM_T_PCH_SPT)
   12885 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12886 				    hsfsts & 0xffffUL);
   12887 			else
   12888 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12889 				    hsfsts);
   12890 		}
   12891 	}
   12892 	return error;
   12893 }
   12894 
   12895 /******************************************************************************
   12896  * This function starts a flash cycle and waits for its completion
   12897  *
   12898  * sc - The pointer to the hw structure
   12899  ****************************************************************************/
   12900 static int32_t
   12901 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12902 {
   12903 	uint16_t hsflctl;
   12904 	uint16_t hsfsts;
   12905 	int32_t error = 1;
   12906 	uint32_t i = 0;
   12907 
   12908 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12909 	if (sc->sc_type >= WM_T_PCH_SPT)
   12910 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12911 	else
   12912 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12913 	hsflctl |= HSFCTL_GO;
   12914 	if (sc->sc_type >= WM_T_PCH_SPT)
   12915 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12916 		    (uint32_t)hsflctl << 16);
   12917 	else
   12918 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12919 
   12920 	/* Wait till FDONE bit is set to 1 */
   12921 	do {
   12922 		if (sc->sc_type >= WM_T_PCH_SPT)
   12923 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12924 			    & 0xffffUL;
   12925 		else
   12926 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12927 		if (hsfsts & HSFSTS_DONE)
   12928 			break;
   12929 		delay(1);
   12930 		i++;
   12931 	} while (i < timeout);
   12932 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12933 		error = 0;
   12934 
   12935 	return error;
   12936 }
   12937 
   12938 /******************************************************************************
   12939  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12940  *
   12941  * sc - The pointer to the hw structure
   12942  * index - The index of the byte or word to read.
   12943  * size - Size of data to read, 1=byte 2=word, 4=dword
   12944  * data - Pointer to the word to store the value read.
   12945  *****************************************************************************/
   12946 static int32_t
   12947 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12948     uint32_t size, uint32_t *data)
   12949 {
   12950 	uint16_t hsfsts;
   12951 	uint16_t hsflctl;
   12952 	uint32_t flash_linear_address;
   12953 	uint32_t flash_data = 0;
   12954 	int32_t error = 1;
   12955 	int32_t count = 0;
   12956 
   12957 	if (size < 1  || size > 4 || data == 0x0 ||
   12958 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12959 		return error;
   12960 
   12961 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12962 	    sc->sc_ich8_flash_base;
   12963 
   12964 	do {
   12965 		delay(1);
   12966 		/* Steps */
   12967 		error = wm_ich8_cycle_init(sc);
   12968 		if (error)
   12969 			break;
   12970 
   12971 		if (sc->sc_type >= WM_T_PCH_SPT)
   12972 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12973 			    >> 16;
   12974 		else
   12975 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12976 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12977 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12978 		    & HSFCTL_BCOUNT_MASK;
   12979 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12980 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12981 			/*
   12982 			 * In SPT, This register is in Lan memory space, not
   12983 			 * flash. Therefore, only 32 bit access is supported.
   12984 			 */
   12985 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12986 			    (uint32_t)hsflctl << 16);
   12987 		} else
   12988 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12989 
   12990 		/*
   12991 		 * Write the last 24 bits of index into Flash Linear address
   12992 		 * field in Flash Address
   12993 		 */
   12994 		/* TODO: TBD maybe check the index against the size of flash */
   12995 
   12996 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12997 
   12998 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12999 
   13000 		/*
   13001 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13002 		 * the whole sequence a few more times, else read in (shift in)
   13003 		 * the Flash Data0, the order is least significant byte first
   13004 		 * msb to lsb
   13005 		 */
   13006 		if (error == 0) {
   13007 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13008 			if (size == 1)
   13009 				*data = (uint8_t)(flash_data & 0x000000FF);
   13010 			else if (size == 2)
   13011 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13012 			else if (size == 4)
   13013 				*data = (uint32_t)flash_data;
   13014 			break;
   13015 		} else {
   13016 			/*
   13017 			 * If we've gotten here, then things are probably
   13018 			 * completely hosed, but if the error condition is
   13019 			 * detected, it won't hurt to give it another try...
   13020 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13021 			 */
   13022 			if (sc->sc_type >= WM_T_PCH_SPT)
   13023 				hsfsts = ICH8_FLASH_READ32(sc,
   13024 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13025 			else
   13026 				hsfsts = ICH8_FLASH_READ16(sc,
   13027 				    ICH_FLASH_HSFSTS);
   13028 
   13029 			if (hsfsts & HSFSTS_ERR) {
   13030 				/* Repeat for some time before giving up. */
   13031 				continue;
   13032 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13033 				break;
   13034 		}
   13035 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13036 
   13037 	return error;
   13038 }
   13039 
   13040 /******************************************************************************
   13041  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13042  *
   13043  * sc - pointer to wm_hw structure
   13044  * index - The index of the byte to read.
   13045  * data - Pointer to a byte to store the value read.
   13046  *****************************************************************************/
   13047 static int32_t
   13048 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13049 {
   13050 	int32_t status;
   13051 	uint32_t word = 0;
   13052 
   13053 	status = wm_read_ich8_data(sc, index, 1, &word);
   13054 	if (status == 0)
   13055 		*data = (uint8_t)word;
   13056 	else
   13057 		*data = 0;
   13058 
   13059 	return status;
   13060 }
   13061 
   13062 /******************************************************************************
   13063  * Reads a word from the NVM using the ICH8 flash access registers.
   13064  *
   13065  * sc - pointer to wm_hw structure
   13066  * index - The starting byte index of the word to read.
   13067  * data - Pointer to a word to store the value read.
   13068  *****************************************************************************/
   13069 static int32_t
   13070 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13071 {
   13072 	int32_t status;
   13073 	uint32_t word = 0;
   13074 
   13075 	status = wm_read_ich8_data(sc, index, 2, &word);
   13076 	if (status == 0)
   13077 		*data = (uint16_t)word;
   13078 	else
   13079 		*data = 0;
   13080 
   13081 	return status;
   13082 }
   13083 
   13084 /******************************************************************************
   13085  * Reads a dword from the NVM using the ICH8 flash access registers.
   13086  *
   13087  * sc - pointer to wm_hw structure
   13088  * index - The starting byte index of the word to read.
   13089  * data - Pointer to a word to store the value read.
   13090  *****************************************************************************/
   13091 static int32_t
   13092 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13093 {
   13094 	int32_t status;
   13095 
   13096 	status = wm_read_ich8_data(sc, index, 4, data);
   13097 	return status;
   13098 }
   13099 
   13100 /******************************************************************************
   13101  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13102  * register.
   13103  *
   13104  * sc - Struct containing variables accessed by shared code
   13105  * offset - offset of word in the EEPROM to read
   13106  * data - word read from the EEPROM
   13107  * words - number of words to read
   13108  *****************************************************************************/
   13109 static int
   13110 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13111 {
   13112 	int32_t	 rv = 0;
   13113 	uint32_t flash_bank = 0;
   13114 	uint32_t act_offset = 0;
   13115 	uint32_t bank_offset = 0;
   13116 	uint16_t word = 0;
   13117 	uint16_t i = 0;
   13118 
   13119 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13120 		device_xname(sc->sc_dev), __func__));
   13121 
   13122 	if (sc->nvm.acquire(sc) != 0)
   13123 		return -1;
   13124 
   13125 	/*
   13126 	 * We need to know which is the valid flash bank.  In the event
   13127 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13128 	 * managing flash_bank. So it cannot be trusted and needs
   13129 	 * to be updated with each read.
   13130 	 */
   13131 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13132 	if (rv) {
   13133 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13134 			device_xname(sc->sc_dev)));
   13135 		flash_bank = 0;
   13136 	}
   13137 
   13138 	/*
   13139 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13140 	 * size
   13141 	 */
   13142 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13143 
   13144 	for (i = 0; i < words; i++) {
   13145 		/* The NVM part needs a byte offset, hence * 2 */
   13146 		act_offset = bank_offset + ((offset + i) * 2);
   13147 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13148 		if (rv) {
   13149 			aprint_error_dev(sc->sc_dev,
   13150 			    "%s: failed to read NVM\n", __func__);
   13151 			break;
   13152 		}
   13153 		data[i] = word;
   13154 	}
   13155 
   13156 	sc->nvm.release(sc);
   13157 	return rv;
   13158 }
   13159 
   13160 /******************************************************************************
   13161  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13162  * register.
   13163  *
   13164  * sc - Struct containing variables accessed by shared code
   13165  * offset - offset of word in the EEPROM to read
   13166  * data - word read from the EEPROM
   13167  * words - number of words to read
   13168  *****************************************************************************/
   13169 static int
   13170 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13171 {
   13172 	int32_t	 rv = 0;
   13173 	uint32_t flash_bank = 0;
   13174 	uint32_t act_offset = 0;
   13175 	uint32_t bank_offset = 0;
   13176 	uint32_t dword = 0;
   13177 	uint16_t i = 0;
   13178 
   13179 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13180 		device_xname(sc->sc_dev), __func__));
   13181 
   13182 	if (sc->nvm.acquire(sc) != 0)
   13183 		return -1;
   13184 
   13185 	/*
   13186 	 * We need to know which is the valid flash bank.  In the event
   13187 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13188 	 * managing flash_bank. So it cannot be trusted and needs
   13189 	 * to be updated with each read.
   13190 	 */
   13191 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13192 	if (rv) {
   13193 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13194 			device_xname(sc->sc_dev)));
   13195 		flash_bank = 0;
   13196 	}
   13197 
   13198 	/*
   13199 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13200 	 * size
   13201 	 */
   13202 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13203 
   13204 	for (i = 0; i < words; i++) {
   13205 		/* The NVM part needs a byte offset, hence * 2 */
   13206 		act_offset = bank_offset + ((offset + i) * 2);
   13207 		/* but we must read dword aligned, so mask ... */
   13208 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13209 		if (rv) {
   13210 			aprint_error_dev(sc->sc_dev,
   13211 			    "%s: failed to read NVM\n", __func__);
   13212 			break;
   13213 		}
   13214 		/* ... and pick out low or high word */
   13215 		if ((act_offset & 0x2) == 0)
   13216 			data[i] = (uint16_t)(dword & 0xFFFF);
   13217 		else
   13218 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13219 	}
   13220 
   13221 	sc->nvm.release(sc);
   13222 	return rv;
   13223 }
   13224 
   13225 /* iNVM */
   13226 
   13227 static int
   13228 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13229 {
   13230 	int32_t	 rv = 0;
   13231 	uint32_t invm_dword;
   13232 	uint16_t i;
   13233 	uint8_t record_type, word_address;
   13234 
   13235 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13236 		device_xname(sc->sc_dev), __func__));
   13237 
   13238 	for (i = 0; i < INVM_SIZE; i++) {
   13239 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13240 		/* Get record type */
   13241 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13242 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13243 			break;
   13244 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13245 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13246 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13247 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13248 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13249 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13250 			if (word_address == address) {
   13251 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13252 				rv = 0;
   13253 				break;
   13254 			}
   13255 		}
   13256 	}
   13257 
   13258 	return rv;
   13259 }
   13260 
   13261 static int
   13262 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13263 {
   13264 	int rv = 0;
   13265 	int i;
   13266 
   13267 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13268 		device_xname(sc->sc_dev), __func__));
   13269 
   13270 	if (sc->nvm.acquire(sc) != 0)
   13271 		return -1;
   13272 
   13273 	for (i = 0; i < words; i++) {
   13274 		switch (offset + i) {
   13275 		case NVM_OFF_MACADDR:
   13276 		case NVM_OFF_MACADDR1:
   13277 		case NVM_OFF_MACADDR2:
   13278 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13279 			if (rv != 0) {
   13280 				data[i] = 0xffff;
   13281 				rv = -1;
   13282 			}
   13283 			break;
   13284 		case NVM_OFF_CFG2:
   13285 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13286 			if (rv != 0) {
   13287 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13288 				rv = 0;
   13289 			}
   13290 			break;
   13291 		case NVM_OFF_CFG4:
   13292 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13293 			if (rv != 0) {
   13294 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13295 				rv = 0;
   13296 			}
   13297 			break;
   13298 		case NVM_OFF_LED_1_CFG:
   13299 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13300 			if (rv != 0) {
   13301 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13302 				rv = 0;
   13303 			}
   13304 			break;
   13305 		case NVM_OFF_LED_0_2_CFG:
   13306 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13307 			if (rv != 0) {
   13308 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13309 				rv = 0;
   13310 			}
   13311 			break;
   13312 		case NVM_OFF_ID_LED_SETTINGS:
   13313 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13314 			if (rv != 0) {
   13315 				*data = ID_LED_RESERVED_FFFF;
   13316 				rv = 0;
   13317 			}
   13318 			break;
   13319 		default:
   13320 			DPRINTF(WM_DEBUG_NVM,
   13321 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13322 			*data = NVM_RESERVED_WORD;
   13323 			break;
   13324 		}
   13325 	}
   13326 
   13327 	sc->nvm.release(sc);
   13328 	return rv;
   13329 }
   13330 
   13331 /* Lock, detecting NVM type, validate checksum, version and read */
   13332 
   13333 static int
   13334 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13335 {
   13336 	uint32_t eecd = 0;
   13337 
   13338 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13339 	    || sc->sc_type == WM_T_82583) {
   13340 		eecd = CSR_READ(sc, WMREG_EECD);
   13341 
   13342 		/* Isolate bits 15 & 16 */
   13343 		eecd = ((eecd >> 15) & 0x03);
   13344 
   13345 		/* If both bits are set, device is Flash type */
   13346 		if (eecd == 0x03)
   13347 			return 0;
   13348 	}
   13349 	return 1;
   13350 }
   13351 
   13352 static int
   13353 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13354 {
   13355 	uint32_t eec;
   13356 
   13357 	eec = CSR_READ(sc, WMREG_EEC);
   13358 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13359 		return 1;
   13360 
   13361 	return 0;
   13362 }
   13363 
   13364 /*
   13365  * wm_nvm_validate_checksum
   13366  *
   13367  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13368  */
   13369 static int
   13370 wm_nvm_validate_checksum(struct wm_softc *sc)
   13371 {
   13372 	uint16_t checksum;
   13373 	uint16_t eeprom_data;
   13374 #ifdef WM_DEBUG
   13375 	uint16_t csum_wordaddr, valid_checksum;
   13376 #endif
   13377 	int i;
   13378 
   13379 	checksum = 0;
   13380 
   13381 	/* Don't check for I211 */
   13382 	if (sc->sc_type == WM_T_I211)
   13383 		return 0;
   13384 
   13385 #ifdef WM_DEBUG
   13386 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13387 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13388 		csum_wordaddr = NVM_OFF_COMPAT;
   13389 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13390 	} else {
   13391 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13392 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13393 	}
   13394 
   13395 	/* Dump EEPROM image for debug */
   13396 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13397 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13398 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13399 		/* XXX PCH_SPT? */
   13400 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13401 		if ((eeprom_data & valid_checksum) == 0)
   13402 			DPRINTF(WM_DEBUG_NVM,
   13403 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13404 				device_xname(sc->sc_dev), eeprom_data,
   13405 				    valid_checksum));
   13406 	}
   13407 
   13408 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13409 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13410 		for (i = 0; i < NVM_SIZE; i++) {
   13411 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13412 				printf("XXXX ");
   13413 			else
   13414 				printf("%04hx ", eeprom_data);
   13415 			if (i % 8 == 7)
   13416 				printf("\n");
   13417 		}
   13418 	}
   13419 
   13420 #endif /* WM_DEBUG */
   13421 
   13422 	for (i = 0; i < NVM_SIZE; i++) {
   13423 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13424 			return 1;
   13425 		checksum += eeprom_data;
   13426 	}
   13427 
   13428 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13429 #ifdef WM_DEBUG
   13430 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13431 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13432 #endif
   13433 	}
   13434 
   13435 	return 0;
   13436 }
   13437 
   13438 static void
   13439 wm_nvm_version_invm(struct wm_softc *sc)
   13440 {
   13441 	uint32_t dword;
   13442 
   13443 	/*
   13444 	 * Linux's code to decode version is very strange, so we don't
   13445 	 * obey that algorithm and just use word 61 as the document.
   13446 	 * Perhaps it's not perfect though...
   13447 	 *
   13448 	 * Example:
   13449 	 *
   13450 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13451 	 */
   13452 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13453 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13454 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13455 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13456 }
   13457 
   13458 static void
   13459 wm_nvm_version(struct wm_softc *sc)
   13460 {
   13461 	uint16_t major, minor, build, patch;
   13462 	uint16_t uid0, uid1;
   13463 	uint16_t nvm_data;
   13464 	uint16_t off;
   13465 	bool check_version = false;
   13466 	bool check_optionrom = false;
   13467 	bool have_build = false;
   13468 	bool have_uid = true;
   13469 
   13470 	/*
   13471 	 * Version format:
   13472 	 *
   13473 	 * XYYZ
   13474 	 * X0YZ
   13475 	 * X0YY
   13476 	 *
   13477 	 * Example:
   13478 	 *
   13479 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13480 	 *	82571	0x50a6	5.10.6?
   13481 	 *	82572	0x506a	5.6.10?
   13482 	 *	82572EI	0x5069	5.6.9?
   13483 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13484 	 *		0x2013	2.1.3?
   13485 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   13486 	 */
   13487 
   13488 	/*
   13489 	 * XXX
   13490 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13491 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13492 	 */
   13493 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13494 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13495 		have_uid = false;
   13496 
   13497 	switch (sc->sc_type) {
   13498 	case WM_T_82571:
   13499 	case WM_T_82572:
   13500 	case WM_T_82574:
   13501 	case WM_T_82583:
   13502 		check_version = true;
   13503 		check_optionrom = true;
   13504 		have_build = true;
   13505 		break;
   13506 	case WM_T_82575:
   13507 	case WM_T_82576:
   13508 	case WM_T_82580:
   13509 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13510 			check_version = true;
   13511 		break;
   13512 	case WM_T_I211:
   13513 		wm_nvm_version_invm(sc);
   13514 		have_uid = false;
   13515 		goto printver;
   13516 	case WM_T_I210:
   13517 		if (!wm_nvm_flash_presence_i210(sc)) {
   13518 			wm_nvm_version_invm(sc);
   13519 			have_uid = false;
   13520 			goto printver;
   13521 		}
   13522 		/* FALLTHROUGH */
   13523 	case WM_T_I350:
   13524 	case WM_T_I354:
   13525 		check_version = true;
   13526 		check_optionrom = true;
   13527 		break;
   13528 	default:
   13529 		return;
   13530 	}
   13531 	if (check_version
   13532 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13533 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13534 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13535 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13536 			build = nvm_data & NVM_BUILD_MASK;
   13537 			have_build = true;
   13538 		} else
   13539 			minor = nvm_data & 0x00ff;
   13540 
   13541 		/* Decimal */
   13542 		minor = (minor / 16) * 10 + (minor % 16);
   13543 		sc->sc_nvm_ver_major = major;
   13544 		sc->sc_nvm_ver_minor = minor;
   13545 
   13546 printver:
   13547 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13548 		    sc->sc_nvm_ver_minor);
   13549 		if (have_build) {
   13550 			sc->sc_nvm_ver_build = build;
   13551 			aprint_verbose(".%d", build);
   13552 		}
   13553 	}
   13554 
   13555 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13556 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13557 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13558 		/* Option ROM Version */
   13559 		if ((off != 0x0000) && (off != 0xffff)) {
   13560 			int rv;
   13561 
   13562 			off += NVM_COMBO_VER_OFF;
   13563 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13564 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13565 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13566 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13567 				/* 16bits */
   13568 				major = uid0 >> 8;
   13569 				build = (uid0 << 8) | (uid1 >> 8);
   13570 				patch = uid1 & 0x00ff;
   13571 				aprint_verbose(", option ROM Version %d.%d.%d",
   13572 				    major, build, patch);
   13573 			}
   13574 		}
   13575 	}
   13576 
   13577 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13578 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13579 }
   13580 
   13581 /*
   13582  * wm_nvm_read:
   13583  *
   13584  *	Read data from the serial EEPROM.
   13585  */
   13586 static int
   13587 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13588 {
   13589 	int rv;
   13590 
   13591 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13592 		device_xname(sc->sc_dev), __func__));
   13593 
   13594 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13595 		return -1;
   13596 
   13597 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13598 
   13599 	return rv;
   13600 }
   13601 
   13602 /*
   13603  * Hardware semaphores.
   13604  * Very complexed...
   13605  */
   13606 
   13607 static int
   13608 wm_get_null(struct wm_softc *sc)
   13609 {
   13610 
   13611 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13612 		device_xname(sc->sc_dev), __func__));
   13613 	return 0;
   13614 }
   13615 
   13616 static void
   13617 wm_put_null(struct wm_softc *sc)
   13618 {
   13619 
   13620 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13621 		device_xname(sc->sc_dev), __func__));
   13622 	return;
   13623 }
   13624 
   13625 static int
   13626 wm_get_eecd(struct wm_softc *sc)
   13627 {
   13628 	uint32_t reg;
   13629 	int x;
   13630 
   13631 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13632 		device_xname(sc->sc_dev), __func__));
   13633 
   13634 	reg = CSR_READ(sc, WMREG_EECD);
   13635 
   13636 	/* Request EEPROM access. */
   13637 	reg |= EECD_EE_REQ;
   13638 	CSR_WRITE(sc, WMREG_EECD, reg);
   13639 
   13640 	/* ..and wait for it to be granted. */
   13641 	for (x = 0; x < 1000; x++) {
   13642 		reg = CSR_READ(sc, WMREG_EECD);
   13643 		if (reg & EECD_EE_GNT)
   13644 			break;
   13645 		delay(5);
   13646 	}
   13647 	if ((reg & EECD_EE_GNT) == 0) {
   13648 		aprint_error_dev(sc->sc_dev,
   13649 		    "could not acquire EEPROM GNT\n");
   13650 		reg &= ~EECD_EE_REQ;
   13651 		CSR_WRITE(sc, WMREG_EECD, reg);
   13652 		return -1;
   13653 	}
   13654 
   13655 	return 0;
   13656 }
   13657 
   13658 static void
   13659 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13660 {
   13661 
   13662 	*eecd |= EECD_SK;
   13663 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13664 	CSR_WRITE_FLUSH(sc);
   13665 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13666 		delay(1);
   13667 	else
   13668 		delay(50);
   13669 }
   13670 
   13671 static void
   13672 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13673 {
   13674 
   13675 	*eecd &= ~EECD_SK;
   13676 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13677 	CSR_WRITE_FLUSH(sc);
   13678 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13679 		delay(1);
   13680 	else
   13681 		delay(50);
   13682 }
   13683 
   13684 static void
   13685 wm_put_eecd(struct wm_softc *sc)
   13686 {
   13687 	uint32_t reg;
   13688 
   13689 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13690 		device_xname(sc->sc_dev), __func__));
   13691 
   13692 	/* Stop nvm */
   13693 	reg = CSR_READ(sc, WMREG_EECD);
   13694 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13695 		/* Pull CS high */
   13696 		reg |= EECD_CS;
   13697 		wm_nvm_eec_clock_lower(sc, &reg);
   13698 	} else {
   13699 		/* CS on Microwire is active-high */
   13700 		reg &= ~(EECD_CS | EECD_DI);
   13701 		CSR_WRITE(sc, WMREG_EECD, reg);
   13702 		wm_nvm_eec_clock_raise(sc, &reg);
   13703 		wm_nvm_eec_clock_lower(sc, &reg);
   13704 	}
   13705 
   13706 	reg = CSR_READ(sc, WMREG_EECD);
   13707 	reg &= ~EECD_EE_REQ;
   13708 	CSR_WRITE(sc, WMREG_EECD, reg);
   13709 
   13710 	return;
   13711 }
   13712 
   13713 /*
   13714  * Get hardware semaphore.
   13715  * Same as e1000_get_hw_semaphore_generic()
   13716  */
   13717 static int
   13718 wm_get_swsm_semaphore(struct wm_softc *sc)
   13719 {
   13720 	int32_t timeout;
   13721 	uint32_t swsm;
   13722 
   13723 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13724 		device_xname(sc->sc_dev), __func__));
   13725 	KASSERT(sc->sc_nvm_wordsize > 0);
   13726 
   13727 retry:
   13728 	/* Get the SW semaphore. */
   13729 	timeout = sc->sc_nvm_wordsize + 1;
   13730 	while (timeout) {
   13731 		swsm = CSR_READ(sc, WMREG_SWSM);
   13732 
   13733 		if ((swsm & SWSM_SMBI) == 0)
   13734 			break;
   13735 
   13736 		delay(50);
   13737 		timeout--;
   13738 	}
   13739 
   13740 	if (timeout == 0) {
   13741 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13742 			/*
   13743 			 * In rare circumstances, the SW semaphore may already
   13744 			 * be held unintentionally. Clear the semaphore once
   13745 			 * before giving up.
   13746 			 */
   13747 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13748 			wm_put_swsm_semaphore(sc);
   13749 			goto retry;
   13750 		}
   13751 		aprint_error_dev(sc->sc_dev,
   13752 		    "could not acquire SWSM SMBI\n");
   13753 		return 1;
   13754 	}
   13755 
   13756 	/* Get the FW semaphore. */
   13757 	timeout = sc->sc_nvm_wordsize + 1;
   13758 	while (timeout) {
   13759 		swsm = CSR_READ(sc, WMREG_SWSM);
   13760 		swsm |= SWSM_SWESMBI;
   13761 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13762 		/* If we managed to set the bit we got the semaphore. */
   13763 		swsm = CSR_READ(sc, WMREG_SWSM);
   13764 		if (swsm & SWSM_SWESMBI)
   13765 			break;
   13766 
   13767 		delay(50);
   13768 		timeout--;
   13769 	}
   13770 
   13771 	if (timeout == 0) {
   13772 		aprint_error_dev(sc->sc_dev,
   13773 		    "could not acquire SWSM SWESMBI\n");
   13774 		/* Release semaphores */
   13775 		wm_put_swsm_semaphore(sc);
   13776 		return 1;
   13777 	}
   13778 	return 0;
   13779 }
   13780 
   13781 /*
   13782  * Put hardware semaphore.
   13783  * Same as e1000_put_hw_semaphore_generic()
   13784  */
   13785 static void
   13786 wm_put_swsm_semaphore(struct wm_softc *sc)
   13787 {
   13788 	uint32_t swsm;
   13789 
   13790 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13791 		device_xname(sc->sc_dev), __func__));
   13792 
   13793 	swsm = CSR_READ(sc, WMREG_SWSM);
   13794 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13795 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13796 }
   13797 
   13798 /*
   13799  * Get SW/FW semaphore.
   13800  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13801  */
   13802 static int
   13803 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13804 {
   13805 	uint32_t swfw_sync;
   13806 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13807 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13808 	int timeout;
   13809 
   13810 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13811 		device_xname(sc->sc_dev), __func__));
   13812 
   13813 	if (sc->sc_type == WM_T_80003)
   13814 		timeout = 50;
   13815 	else
   13816 		timeout = 200;
   13817 
   13818 	while (timeout) {
   13819 		if (wm_get_swsm_semaphore(sc)) {
   13820 			aprint_error_dev(sc->sc_dev,
   13821 			    "%s: failed to get semaphore\n",
   13822 			    __func__);
   13823 			return 1;
   13824 		}
   13825 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13826 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13827 			swfw_sync |= swmask;
   13828 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13829 			wm_put_swsm_semaphore(sc);
   13830 			return 0;
   13831 		}
   13832 		wm_put_swsm_semaphore(sc);
   13833 		delay(5000);
   13834 		timeout--;
   13835 	}
   13836 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13837 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13838 	return 1;
   13839 }
   13840 
   13841 static void
   13842 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13843 {
   13844 	uint32_t swfw_sync;
   13845 
   13846 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13847 		device_xname(sc->sc_dev), __func__));
   13848 
   13849 	while (wm_get_swsm_semaphore(sc) != 0)
   13850 		continue;
   13851 
   13852 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13853 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13854 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13855 
   13856 	wm_put_swsm_semaphore(sc);
   13857 }
   13858 
   13859 static int
   13860 wm_get_nvm_80003(struct wm_softc *sc)
   13861 {
   13862 	int rv;
   13863 
   13864 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13865 		device_xname(sc->sc_dev), __func__));
   13866 
   13867 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13868 		aprint_error_dev(sc->sc_dev,
   13869 		    "%s: failed to get semaphore(SWFW)\n",
   13870 		    __func__);
   13871 		return rv;
   13872 	}
   13873 
   13874 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13875 	    && (rv = wm_get_eecd(sc)) != 0) {
   13876 		aprint_error_dev(sc->sc_dev,
   13877 		    "%s: failed to get semaphore(EECD)\n",
   13878 		    __func__);
   13879 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13880 		return rv;
   13881 	}
   13882 
   13883 	return 0;
   13884 }
   13885 
   13886 static void
   13887 wm_put_nvm_80003(struct wm_softc *sc)
   13888 {
   13889 
   13890 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13891 		device_xname(sc->sc_dev), __func__));
   13892 
   13893 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13894 		wm_put_eecd(sc);
   13895 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13896 }
   13897 
   13898 static int
   13899 wm_get_nvm_82571(struct wm_softc *sc)
   13900 {
   13901 	int rv;
   13902 
   13903 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13904 		device_xname(sc->sc_dev), __func__));
   13905 
   13906 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13907 		return rv;
   13908 
   13909 	switch (sc->sc_type) {
   13910 	case WM_T_82573:
   13911 		break;
   13912 	default:
   13913 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13914 			rv = wm_get_eecd(sc);
   13915 		break;
   13916 	}
   13917 
   13918 	if (rv != 0) {
   13919 		aprint_error_dev(sc->sc_dev,
   13920 		    "%s: failed to get semaphore\n",
   13921 		    __func__);
   13922 		wm_put_swsm_semaphore(sc);
   13923 	}
   13924 
   13925 	return rv;
   13926 }
   13927 
   13928 static void
   13929 wm_put_nvm_82571(struct wm_softc *sc)
   13930 {
   13931 
   13932 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13933 		device_xname(sc->sc_dev), __func__));
   13934 
   13935 	switch (sc->sc_type) {
   13936 	case WM_T_82573:
   13937 		break;
   13938 	default:
   13939 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13940 			wm_put_eecd(sc);
   13941 		break;
   13942 	}
   13943 
   13944 	wm_put_swsm_semaphore(sc);
   13945 }
   13946 
   13947 static int
   13948 wm_get_phy_82575(struct wm_softc *sc)
   13949 {
   13950 
   13951 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13952 		device_xname(sc->sc_dev), __func__));
   13953 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13954 }
   13955 
   13956 static void
   13957 wm_put_phy_82575(struct wm_softc *sc)
   13958 {
   13959 
   13960 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13961 		device_xname(sc->sc_dev), __func__));
   13962 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13963 }
   13964 
   13965 static int
   13966 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13967 {
   13968 	uint32_t ext_ctrl;
   13969 	int timeout = 200;
   13970 
   13971 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13972 		device_xname(sc->sc_dev), __func__));
   13973 
   13974 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13975 	for (timeout = 0; timeout < 200; timeout++) {
   13976 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13977 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13978 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13979 
   13980 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13981 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13982 			return 0;
   13983 		delay(5000);
   13984 	}
   13985 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13986 	    device_xname(sc->sc_dev), ext_ctrl);
   13987 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13988 	return 1;
   13989 }
   13990 
   13991 static void
   13992 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13993 {
   13994 	uint32_t ext_ctrl;
   13995 
   13996 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13997 		device_xname(sc->sc_dev), __func__));
   13998 
   13999 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14000 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14001 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14002 
   14003 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14004 }
   14005 
   14006 static int
   14007 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14008 {
   14009 	uint32_t ext_ctrl;
   14010 	int timeout;
   14011 
   14012 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14013 		device_xname(sc->sc_dev), __func__));
   14014 	mutex_enter(sc->sc_ich_phymtx);
   14015 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14016 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14017 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14018 			break;
   14019 		delay(1000);
   14020 	}
   14021 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14022 		printf("%s: SW has already locked the resource\n",
   14023 		    device_xname(sc->sc_dev));
   14024 		goto out;
   14025 	}
   14026 
   14027 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14028 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14029 	for (timeout = 0; timeout < 1000; timeout++) {
   14030 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14031 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14032 			break;
   14033 		delay(1000);
   14034 	}
   14035 	if (timeout >= 1000) {
   14036 		printf("%s: failed to acquire semaphore\n",
   14037 		    device_xname(sc->sc_dev));
   14038 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14039 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14040 		goto out;
   14041 	}
   14042 	return 0;
   14043 
   14044 out:
   14045 	mutex_exit(sc->sc_ich_phymtx);
   14046 	return 1;
   14047 }
   14048 
   14049 static void
   14050 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14051 {
   14052 	uint32_t ext_ctrl;
   14053 
   14054 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14055 		device_xname(sc->sc_dev), __func__));
   14056 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14057 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14058 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14059 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14060 	} else {
   14061 		printf("%s: Semaphore unexpectedly released\n",
   14062 		    device_xname(sc->sc_dev));
   14063 	}
   14064 
   14065 	mutex_exit(sc->sc_ich_phymtx);
   14066 }
   14067 
   14068 static int
   14069 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14070 {
   14071 
   14072 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14073 		device_xname(sc->sc_dev), __func__));
   14074 	mutex_enter(sc->sc_ich_nvmmtx);
   14075 
   14076 	return 0;
   14077 }
   14078 
   14079 static void
   14080 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14081 {
   14082 
   14083 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14084 		device_xname(sc->sc_dev), __func__));
   14085 	mutex_exit(sc->sc_ich_nvmmtx);
   14086 }
   14087 
   14088 static int
   14089 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14090 {
   14091 	int i = 0;
   14092 	uint32_t reg;
   14093 
   14094 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14095 		device_xname(sc->sc_dev), __func__));
   14096 
   14097 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14098 	do {
   14099 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14100 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14101 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14102 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14103 			break;
   14104 		delay(2*1000);
   14105 		i++;
   14106 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14107 
   14108 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14109 		wm_put_hw_semaphore_82573(sc);
   14110 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14111 		    device_xname(sc->sc_dev));
   14112 		return -1;
   14113 	}
   14114 
   14115 	return 0;
   14116 }
   14117 
   14118 static void
   14119 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14120 {
   14121 	uint32_t reg;
   14122 
   14123 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14124 		device_xname(sc->sc_dev), __func__));
   14125 
   14126 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14127 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14128 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14129 }
   14130 
   14131 /*
   14132  * Management mode and power management related subroutines.
   14133  * BMC, AMT, suspend/resume and EEE.
   14134  */
   14135 
   14136 #ifdef WM_WOL
   14137 static int
   14138 wm_check_mng_mode(struct wm_softc *sc)
   14139 {
   14140 	int rv;
   14141 
   14142 	switch (sc->sc_type) {
   14143 	case WM_T_ICH8:
   14144 	case WM_T_ICH9:
   14145 	case WM_T_ICH10:
   14146 	case WM_T_PCH:
   14147 	case WM_T_PCH2:
   14148 	case WM_T_PCH_LPT:
   14149 	case WM_T_PCH_SPT:
   14150 	case WM_T_PCH_CNP:
   14151 		rv = wm_check_mng_mode_ich8lan(sc);
   14152 		break;
   14153 	case WM_T_82574:
   14154 	case WM_T_82583:
   14155 		rv = wm_check_mng_mode_82574(sc);
   14156 		break;
   14157 	case WM_T_82571:
   14158 	case WM_T_82572:
   14159 	case WM_T_82573:
   14160 	case WM_T_80003:
   14161 		rv = wm_check_mng_mode_generic(sc);
   14162 		break;
   14163 	default:
   14164 		/* noting to do */
   14165 		rv = 0;
   14166 		break;
   14167 	}
   14168 
   14169 	return rv;
   14170 }
   14171 
   14172 static int
   14173 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14174 {
   14175 	uint32_t fwsm;
   14176 
   14177 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14178 
   14179 	if (((fwsm & FWSM_FW_VALID) != 0)
   14180 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14181 		return 1;
   14182 
   14183 	return 0;
   14184 }
   14185 
   14186 static int
   14187 wm_check_mng_mode_82574(struct wm_softc *sc)
   14188 {
   14189 	uint16_t data;
   14190 
   14191 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14192 
   14193 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14194 		return 1;
   14195 
   14196 	return 0;
   14197 }
   14198 
   14199 static int
   14200 wm_check_mng_mode_generic(struct wm_softc *sc)
   14201 {
   14202 	uint32_t fwsm;
   14203 
   14204 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14205 
   14206 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14207 		return 1;
   14208 
   14209 	return 0;
   14210 }
   14211 #endif /* WM_WOL */
   14212 
   14213 static int
   14214 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14215 {
   14216 	uint32_t manc, fwsm, factps;
   14217 
   14218 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14219 		return 0;
   14220 
   14221 	manc = CSR_READ(sc, WMREG_MANC);
   14222 
   14223 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14224 		device_xname(sc->sc_dev), manc));
   14225 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14226 		return 0;
   14227 
   14228 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14229 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14230 		factps = CSR_READ(sc, WMREG_FACTPS);
   14231 		if (((factps & FACTPS_MNGCG) == 0)
   14232 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14233 			return 1;
   14234 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14235 		uint16_t data;
   14236 
   14237 		factps = CSR_READ(sc, WMREG_FACTPS);
   14238 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14239 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14240 			device_xname(sc->sc_dev), factps, data));
   14241 		if (((factps & FACTPS_MNGCG) == 0)
   14242 		    && ((data & NVM_CFG2_MNGM_MASK)
   14243 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14244 			return 1;
   14245 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14246 	    && ((manc & MANC_ASF_EN) == 0))
   14247 		return 1;
   14248 
   14249 	return 0;
   14250 }
   14251 
   14252 static bool
   14253 wm_phy_resetisblocked(struct wm_softc *sc)
   14254 {
   14255 	bool blocked = false;
   14256 	uint32_t reg;
   14257 	int i = 0;
   14258 
   14259 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14260 		device_xname(sc->sc_dev), __func__));
   14261 
   14262 	switch (sc->sc_type) {
   14263 	case WM_T_ICH8:
   14264 	case WM_T_ICH9:
   14265 	case WM_T_ICH10:
   14266 	case WM_T_PCH:
   14267 	case WM_T_PCH2:
   14268 	case WM_T_PCH_LPT:
   14269 	case WM_T_PCH_SPT:
   14270 	case WM_T_PCH_CNP:
   14271 		do {
   14272 			reg = CSR_READ(sc, WMREG_FWSM);
   14273 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14274 				blocked = true;
   14275 				delay(10*1000);
   14276 				continue;
   14277 			}
   14278 			blocked = false;
   14279 		} while (blocked && (i++ < 30));
   14280 		return blocked;
   14281 		break;
   14282 	case WM_T_82571:
   14283 	case WM_T_82572:
   14284 	case WM_T_82573:
   14285 	case WM_T_82574:
   14286 	case WM_T_82583:
   14287 	case WM_T_80003:
   14288 		reg = CSR_READ(sc, WMREG_MANC);
   14289 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14290 			return true;
   14291 		else
   14292 			return false;
   14293 		break;
   14294 	default:
   14295 		/* no problem */
   14296 		break;
   14297 	}
   14298 
   14299 	return false;
   14300 }
   14301 
   14302 static void
   14303 wm_get_hw_control(struct wm_softc *sc)
   14304 {
   14305 	uint32_t reg;
   14306 
   14307 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14308 		device_xname(sc->sc_dev), __func__));
   14309 
   14310 	if (sc->sc_type == WM_T_82573) {
   14311 		reg = CSR_READ(sc, WMREG_SWSM);
   14312 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14313 	} else if (sc->sc_type >= WM_T_82571) {
   14314 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14315 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14316 	}
   14317 }
   14318 
   14319 static void
   14320 wm_release_hw_control(struct wm_softc *sc)
   14321 {
   14322 	uint32_t reg;
   14323 
   14324 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14325 		device_xname(sc->sc_dev), __func__));
   14326 
   14327 	if (sc->sc_type == WM_T_82573) {
   14328 		reg = CSR_READ(sc, WMREG_SWSM);
   14329 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14330 	} else if (sc->sc_type >= WM_T_82571) {
   14331 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14332 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14333 	}
   14334 }
   14335 
   14336 static void
   14337 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14338 {
   14339 	uint32_t reg;
   14340 
   14341 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14342 		device_xname(sc->sc_dev), __func__));
   14343 
   14344 	if (sc->sc_type < WM_T_PCH2)
   14345 		return;
   14346 
   14347 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14348 
   14349 	if (gate)
   14350 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14351 	else
   14352 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14353 
   14354 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14355 }
   14356 
   14357 static int
   14358 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14359 {
   14360 	uint32_t fwsm, reg;
   14361 	int rv = 0;
   14362 
   14363 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14364 		device_xname(sc->sc_dev), __func__));
   14365 
   14366 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14367 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14368 
   14369 	/* Disable ULP */
   14370 	wm_ulp_disable(sc);
   14371 
   14372 	/* Acquire PHY semaphore */
   14373 	rv = sc->phy.acquire(sc);
   14374 	if (rv != 0) {
   14375 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14376 		device_xname(sc->sc_dev), __func__));
   14377 		return -1;
   14378 	}
   14379 
   14380 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14381 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14382 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14383 	 */
   14384 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14385 	switch (sc->sc_type) {
   14386 	case WM_T_PCH_LPT:
   14387 	case WM_T_PCH_SPT:
   14388 	case WM_T_PCH_CNP:
   14389 		if (wm_phy_is_accessible_pchlan(sc))
   14390 			break;
   14391 
   14392 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14393 		 * forcing MAC to SMBus mode first.
   14394 		 */
   14395 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14396 		reg |= CTRL_EXT_FORCE_SMBUS;
   14397 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14398 #if 0
   14399 		/* XXX Isn't this required??? */
   14400 		CSR_WRITE_FLUSH(sc);
   14401 #endif
   14402 		/* Wait 50 milliseconds for MAC to finish any retries
   14403 		 * that it might be trying to perform from previous
   14404 		 * attempts to acknowledge any phy read requests.
   14405 		 */
   14406 		delay(50 * 1000);
   14407 		/* FALLTHROUGH */
   14408 	case WM_T_PCH2:
   14409 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14410 			break;
   14411 		/* FALLTHROUGH */
   14412 	case WM_T_PCH:
   14413 		if (sc->sc_type == WM_T_PCH)
   14414 			if ((fwsm & FWSM_FW_VALID) != 0)
   14415 				break;
   14416 
   14417 		if (wm_phy_resetisblocked(sc) == true) {
   14418 			printf("XXX reset is blocked(3)\n");
   14419 			break;
   14420 		}
   14421 
   14422 		/* Toggle LANPHYPC Value bit */
   14423 		wm_toggle_lanphypc_pch_lpt(sc);
   14424 
   14425 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14426 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14427 				break;
   14428 
   14429 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14430 			 * so ensure that the MAC is also out of SMBus mode
   14431 			 */
   14432 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14433 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14434 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14435 
   14436 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14437 				break;
   14438 			rv = -1;
   14439 		}
   14440 		break;
   14441 	default:
   14442 		break;
   14443 	}
   14444 
   14445 	/* Release semaphore */
   14446 	sc->phy.release(sc);
   14447 
   14448 	if (rv == 0) {
   14449 		/* Check to see if able to reset PHY.  Print error if not */
   14450 		if (wm_phy_resetisblocked(sc)) {
   14451 			printf("XXX reset is blocked(4)\n");
   14452 			goto out;
   14453 		}
   14454 
   14455 		/* Reset the PHY before any access to it.  Doing so, ensures
   14456 		 * that the PHY is in a known good state before we read/write
   14457 		 * PHY registers.  The generic reset is sufficient here,
   14458 		 * because we haven't determined the PHY type yet.
   14459 		 */
   14460 		if (wm_reset_phy(sc) != 0)
   14461 			goto out;
   14462 
   14463 		/* On a successful reset, possibly need to wait for the PHY
   14464 		 * to quiesce to an accessible state before returning control
   14465 		 * to the calling function.  If the PHY does not quiesce, then
   14466 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14467 		 *  the PHY is in.
   14468 		 */
   14469 		if (wm_phy_resetisblocked(sc))
   14470 			printf("XXX reset is blocked(4)\n");
   14471 	}
   14472 
   14473 out:
   14474 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14475 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14476 		delay(10*1000);
   14477 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14478 	}
   14479 
   14480 	return 0;
   14481 }
   14482 
   14483 static void
   14484 wm_init_manageability(struct wm_softc *sc)
   14485 {
   14486 
   14487 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14488 		device_xname(sc->sc_dev), __func__));
   14489 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14490 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14491 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14492 
   14493 		/* Disable hardware interception of ARP */
   14494 		manc &= ~MANC_ARP_EN;
   14495 
   14496 		/* Enable receiving management packets to the host */
   14497 		if (sc->sc_type >= WM_T_82571) {
   14498 			manc |= MANC_EN_MNG2HOST;
   14499 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14500 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14501 		}
   14502 
   14503 		CSR_WRITE(sc, WMREG_MANC, manc);
   14504 	}
   14505 }
   14506 
   14507 static void
   14508 wm_release_manageability(struct wm_softc *sc)
   14509 {
   14510 
   14511 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14512 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14513 
   14514 		manc |= MANC_ARP_EN;
   14515 		if (sc->sc_type >= WM_T_82571)
   14516 			manc &= ~MANC_EN_MNG2HOST;
   14517 
   14518 		CSR_WRITE(sc, WMREG_MANC, manc);
   14519 	}
   14520 }
   14521 
   14522 static void
   14523 wm_get_wakeup(struct wm_softc *sc)
   14524 {
   14525 
   14526 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14527 	switch (sc->sc_type) {
   14528 	case WM_T_82573:
   14529 	case WM_T_82583:
   14530 		sc->sc_flags |= WM_F_HAS_AMT;
   14531 		/* FALLTHROUGH */
   14532 	case WM_T_80003:
   14533 	case WM_T_82575:
   14534 	case WM_T_82576:
   14535 	case WM_T_82580:
   14536 	case WM_T_I350:
   14537 	case WM_T_I354:
   14538 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14539 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14540 		/* FALLTHROUGH */
   14541 	case WM_T_82541:
   14542 	case WM_T_82541_2:
   14543 	case WM_T_82547:
   14544 	case WM_T_82547_2:
   14545 	case WM_T_82571:
   14546 	case WM_T_82572:
   14547 	case WM_T_82574:
   14548 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14549 		break;
   14550 	case WM_T_ICH8:
   14551 	case WM_T_ICH9:
   14552 	case WM_T_ICH10:
   14553 	case WM_T_PCH:
   14554 	case WM_T_PCH2:
   14555 	case WM_T_PCH_LPT:
   14556 	case WM_T_PCH_SPT:
   14557 	case WM_T_PCH_CNP:
   14558 		sc->sc_flags |= WM_F_HAS_AMT;
   14559 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14560 		break;
   14561 	default:
   14562 		break;
   14563 	}
   14564 
   14565 	/* 1: HAS_MANAGE */
   14566 	if (wm_enable_mng_pass_thru(sc) != 0)
   14567 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14568 
   14569 	/*
   14570 	 * Note that the WOL flags is set after the resetting of the eeprom
   14571 	 * stuff
   14572 	 */
   14573 }
   14574 
   14575 /*
   14576  * Unconfigure Ultra Low Power mode.
   14577  * Only for I217 and newer (see below).
   14578  */
   14579 static int
   14580 wm_ulp_disable(struct wm_softc *sc)
   14581 {
   14582 	uint32_t reg;
   14583 	uint16_t phyreg;
   14584 	int i = 0, rv = 0;
   14585 
   14586 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14587 		device_xname(sc->sc_dev), __func__));
   14588 	/* Exclude old devices */
   14589 	if ((sc->sc_type < WM_T_PCH_LPT)
   14590 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14591 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14592 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14593 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14594 		return 0;
   14595 
   14596 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14597 		/* Request ME un-configure ULP mode in the PHY */
   14598 		reg = CSR_READ(sc, WMREG_H2ME);
   14599 		reg &= ~H2ME_ULP;
   14600 		reg |= H2ME_ENFORCE_SETTINGS;
   14601 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14602 
   14603 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14604 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14605 			if (i++ == 30) {
   14606 				printf("%s timed out\n", __func__);
   14607 				return -1;
   14608 			}
   14609 			delay(10 * 1000);
   14610 		}
   14611 		reg = CSR_READ(sc, WMREG_H2ME);
   14612 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14613 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14614 
   14615 		return 0;
   14616 	}
   14617 
   14618 	/* Acquire semaphore */
   14619 	rv = sc->phy.acquire(sc);
   14620 	if (rv != 0) {
   14621 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14622 		device_xname(sc->sc_dev), __func__));
   14623 		return -1;
   14624 	}
   14625 
   14626 	/* Toggle LANPHYPC */
   14627 	wm_toggle_lanphypc_pch_lpt(sc);
   14628 
   14629 	/* Unforce SMBus mode in PHY */
   14630 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14631 	if (rv != 0) {
   14632 		uint32_t reg2;
   14633 
   14634 		printf("%s: Force SMBus first.\n", __func__);
   14635 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14636 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14637 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14638 		delay(50 * 1000);
   14639 
   14640 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14641 		    &phyreg);
   14642 		if (rv != 0)
   14643 			goto release;
   14644 	}
   14645 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14646 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14647 
   14648 	/* Unforce SMBus mode in MAC */
   14649 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14650 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14651 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14652 
   14653 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14654 	if (rv != 0)
   14655 		goto release;
   14656 	phyreg |= HV_PM_CTRL_K1_ENA;
   14657 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14658 
   14659 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14660 		&phyreg);
   14661 	if (rv != 0)
   14662 		goto release;
   14663 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14664 	    | I218_ULP_CONFIG1_STICKY_ULP
   14665 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14666 	    | I218_ULP_CONFIG1_WOL_HOST
   14667 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14668 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14669 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14670 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14671 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14672 	phyreg |= I218_ULP_CONFIG1_START;
   14673 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14674 
   14675 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14676 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14677 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14678 
   14679 release:
   14680 	/* Release semaphore */
   14681 	sc->phy.release(sc);
   14682 	wm_gmii_reset(sc);
   14683 	delay(50 * 1000);
   14684 
   14685 	return rv;
   14686 }
   14687 
   14688 /* WOL in the newer chipset interfaces (pchlan) */
   14689 static int
   14690 wm_enable_phy_wakeup(struct wm_softc *sc)
   14691 {
   14692 	device_t dev = sc->sc_dev;
   14693 	uint32_t mreg, moff;
   14694 	uint16_t wuce, wuc, wufc, preg;
   14695 	int i, rv;
   14696 
   14697 	KASSERT(sc->sc_type >= WM_T_PCH);
   14698 
   14699 	/* Copy MAC RARs to PHY RARs */
   14700 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   14701 
   14702 	/* Activate PHY wakeup */
   14703 	rv = sc->phy.acquire(sc);
   14704 	if (rv != 0) {
   14705 		device_printf(dev, "%s: failed to acquire semaphore\n",
   14706 		    __func__);
   14707 		return rv;
   14708 	}
   14709 
   14710 	/*
   14711 	 * Enable access to PHY wakeup registers.
   14712 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   14713 	 */
   14714 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   14715 	if (rv != 0) {
   14716 		device_printf(dev,
   14717 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   14718 		goto release;
   14719 	}
   14720 
   14721 	/* Copy MAC MTA to PHY MTA */
   14722 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   14723 		uint16_t lo, hi;
   14724 
   14725 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   14726 		lo = (uint16_t)(mreg & 0xffff);
   14727 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   14728 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   14729 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   14730 	}
   14731 
   14732 	/* Configure PHY Rx Control register */
   14733 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   14734 	mreg = CSR_READ(sc, WMREG_RCTL);
   14735 	if (mreg & RCTL_UPE)
   14736 		preg |= BM_RCTL_UPE;
   14737 	if (mreg & RCTL_MPE)
   14738 		preg |= BM_RCTL_MPE;
   14739 	preg &= ~(BM_RCTL_MO_MASK);
   14740 	moff = __SHIFTOUT(mreg, RCTL_MO);
   14741 	if (moff != 0)
   14742 		preg |= moff << BM_RCTL_MO_SHIFT;
   14743 	if (mreg & RCTL_BAM)
   14744 		preg |= BM_RCTL_BAM;
   14745 	if (mreg & RCTL_PMCF)
   14746 		preg |= BM_RCTL_PMCF;
   14747 	mreg = CSR_READ(sc, WMREG_CTRL);
   14748 	if (mreg & CTRL_RFCE)
   14749 		preg |= BM_RCTL_RFCE;
   14750 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   14751 
   14752 	wuc = WUC_APME | WUC_PME_EN;
   14753 	wufc = WUFC_MAG;
   14754 	/* Enable PHY wakeup in MAC register */
   14755 	CSR_WRITE(sc, WMREG_WUC,
   14756 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   14757 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   14758 
   14759 	/* Configure and enable PHY wakeup in PHY registers */
   14760 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   14761 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   14762 
   14763 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   14764 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   14765 
   14766 release:
   14767 	sc->phy.release(sc);
   14768 
   14769 	return 0;
   14770 }
   14771 
   14772 /* Power down workaround on D3 */
   14773 static void
   14774 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14775 {
   14776 	uint32_t reg;
   14777 	uint16_t phyreg;
   14778 	int i;
   14779 
   14780 	for (i = 0; i < 2; i++) {
   14781 		/* Disable link */
   14782 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14783 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14784 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14785 
   14786 		/*
   14787 		 * Call gig speed drop workaround on Gig disable before
   14788 		 * accessing any PHY registers
   14789 		 */
   14790 		if (sc->sc_type == WM_T_ICH8)
   14791 			wm_gig_downshift_workaround_ich8lan(sc);
   14792 
   14793 		/* Write VR power-down enable */
   14794 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14795 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14796 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14797 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   14798 
   14799 		/* Read it back and test */
   14800 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14801 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14802 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14803 			break;
   14804 
   14805 		/* Issue PHY reset and repeat at most one more time */
   14806 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14807 	}
   14808 }
   14809 
   14810 /*
   14811  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   14812  *  @sc: pointer to the HW structure
   14813  *
   14814  *  During S0 to Sx transition, it is possible the link remains at gig
   14815  *  instead of negotiating to a lower speed.  Before going to Sx, set
   14816  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   14817  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   14818  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   14819  *  needs to be written.
   14820  *  Parts that support (and are linked to a partner which support) EEE in
   14821  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   14822  *  than 10Mbps w/o EEE.
   14823  */
   14824 static void
   14825 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   14826 {
   14827 	device_t dev = sc->sc_dev;
   14828 	struct ethercom *ec = &sc->sc_ethercom;
   14829 	uint32_t phy_ctrl;
   14830 	int rv;
   14831 
   14832 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   14833 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   14834 
   14835 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   14836 
   14837 	if (sc->sc_phytype == WMPHY_I217) {
   14838 		uint16_t devid = sc->sc_pcidevid;
   14839 
   14840 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   14841 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   14842 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   14843 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   14844 		    (sc->sc_type >= WM_T_PCH_SPT))
   14845 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   14846 			    CSR_READ(sc, WMREG_FEXTNVM6)
   14847 			    & ~FEXTNVM6_REQ_PLL_CLK);
   14848 
   14849 		if (sc->phy.acquire(sc) != 0)
   14850 			goto out;
   14851 
   14852 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   14853 			uint16_t eee_advert;
   14854 
   14855 			rv = wm_read_emi_reg_locked(dev,
   14856 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   14857 			if (rv)
   14858 				goto release;
   14859 
   14860 			/*
   14861 			 * Disable LPLU if both link partners support 100BaseT
   14862 			 * EEE and 100Full is advertised on both ends of the
   14863 			 * link, and enable Auto Enable LPI since there will
   14864 			 * be no driver to enable LPI while in Sx.
   14865 			 */
   14866 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   14867 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   14868 				uint16_t anar, phy_reg;
   14869 
   14870 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   14871 				    &anar);
   14872 				if (anar & ANAR_TX_FD) {
   14873 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   14874 					    PHY_CTRL_NOND0A_LPLU);
   14875 
   14876 					/* Set Auto Enable LPI after link up */
   14877 					sc->phy.readreg_locked(dev, 2,
   14878 					    I217_LPI_GPIO_CTRL, &phy_reg);
   14879 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14880 					sc->phy.writereg_locked(dev, 2,
   14881 					    I217_LPI_GPIO_CTRL, phy_reg);
   14882 				}
   14883 			}
   14884 		}
   14885 
   14886 		/*
   14887 		 * For i217 Intel Rapid Start Technology support,
   14888 		 * when the system is going into Sx and no manageability engine
   14889 		 * is present, the driver must configure proxy to reset only on
   14890 		 * power good.	LPI (Low Power Idle) state must also reset only
   14891 		 * on power good, as well as the MTA (Multicast table array).
   14892 		 * The SMBus release must also be disabled on LCD reset.
   14893 		 */
   14894 
   14895 		/*
   14896 		 * Enable MTA to reset for Intel Rapid Start Technology
   14897 		 * Support
   14898 		 */
   14899 
   14900 release:
   14901 		sc->phy.release(sc);
   14902 	}
   14903 out:
   14904 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   14905 
   14906 	if (sc->sc_type == WM_T_ICH8)
   14907 		wm_gig_downshift_workaround_ich8lan(sc);
   14908 
   14909 	if (sc->sc_type >= WM_T_PCH) {
   14910 		wm_oem_bits_config_ich8lan(sc, false);
   14911 
   14912 		/* Reset PHY to activate OEM bits on 82577/8 */
   14913 		if (sc->sc_type == WM_T_PCH)
   14914 			wm_reset_phy(sc);
   14915 
   14916 		if (sc->phy.acquire(sc) != 0)
   14917 			return;
   14918 		wm_write_smbus_addr(sc);
   14919 		sc->phy.release(sc);
   14920 	}
   14921 }
   14922 
   14923 /*
   14924  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   14925  *  @sc: pointer to the HW structure
   14926  *
   14927  *  During Sx to S0 transitions on non-managed devices or managed devices
   14928  *  on which PHY resets are not blocked, if the PHY registers cannot be
   14929  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   14930  *  the PHY.
   14931  *  On i217, setup Intel Rapid Start Technology.
   14932  */
   14933 static int
   14934 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   14935 {
   14936 	device_t dev = sc->sc_dev;
   14937 	int rv;
   14938 
   14939 	if (sc->sc_type < WM_T_PCH2)
   14940 		return 0;
   14941 
   14942 	rv = wm_init_phy_workarounds_pchlan(sc);
   14943 	if (rv != 0)
   14944 		return -1;
   14945 
   14946 	/* For i217 Intel Rapid Start Technology support when the system
   14947 	 * is transitioning from Sx and no manageability engine is present
   14948 	 * configure SMBus to restore on reset, disable proxy, and enable
   14949 	 * the reset on MTA (Multicast table array).
   14950 	 */
   14951 	if (sc->sc_phytype == WMPHY_I217) {
   14952 		uint16_t phy_reg;
   14953 
   14954 		if (sc->phy.acquire(sc) != 0)
   14955 			return -1;
   14956 
   14957 		/* Clear Auto Enable LPI after link up */
   14958 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   14959 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14960 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   14961 
   14962 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14963 			/* Restore clear on SMB if no manageability engine
   14964 			 * is present
   14965 			 */
   14966 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   14967 			    &phy_reg);
   14968 			if (rv != 0)
   14969 				goto release;
   14970 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   14971 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   14972 
   14973 			/* Disable Proxy */
   14974 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   14975 		}
   14976 		/* Enable reset on MTA */
   14977 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   14978 		if (rv != 0)
   14979 			goto release;
   14980 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   14981 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   14982 
   14983 release:
   14984 		sc->phy.release(sc);
   14985 		return rv;
   14986 	}
   14987 
   14988 	return 0;
   14989 }
   14990 
   14991 static void
   14992 wm_enable_wakeup(struct wm_softc *sc)
   14993 {
   14994 	uint32_t reg, pmreg;
   14995 	pcireg_t pmode;
   14996 	int rv = 0;
   14997 
   14998 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14999 		device_xname(sc->sc_dev), __func__));
   15000 
   15001 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15002 	    &pmreg, NULL) == 0)
   15003 		return;
   15004 
   15005 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15006 		goto pme;
   15007 
   15008 	/* Advertise the wakeup capability */
   15009 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15010 	    | CTRL_SWDPIN(3));
   15011 
   15012 	/* Keep the laser running on fiber adapters */
   15013 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15014 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15015 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15016 		reg |= CTRL_EXT_SWDPIN(3);
   15017 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15018 	}
   15019 
   15020 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15021 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15022 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15023 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15024 		wm_suspend_workarounds_ich8lan(sc);
   15025 
   15026 #if 0	/* for the multicast packet */
   15027 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15028 	reg |= WUFC_MC;
   15029 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15030 #endif
   15031 
   15032 	if (sc->sc_type >= WM_T_PCH) {
   15033 		rv = wm_enable_phy_wakeup(sc);
   15034 		if (rv != 0)
   15035 			goto pme;
   15036 	} else {
   15037 		/* Enable wakeup by the MAC */
   15038 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15039 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15040 	}
   15041 
   15042 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15043 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15044 		|| (sc->sc_type == WM_T_PCH2))
   15045 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15046 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15047 
   15048 pme:
   15049 	/* Request PME */
   15050 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15051 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15052 		/* For WOL */
   15053 		pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   15054 	} else {
   15055 		/* Disable WOL */
   15056 		pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   15057 	}
   15058 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15059 }
   15060 
   15061 /* Disable ASPM L0s and/or L1 for workaround */
   15062 static void
   15063 wm_disable_aspm(struct wm_softc *sc)
   15064 {
   15065 	pcireg_t reg, mask = 0;
   15066 	unsigned const char *str = "";
   15067 
   15068 	/*
   15069 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15070 	 * space.
   15071 	 */
   15072 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15073 		return;
   15074 
   15075 	switch (sc->sc_type) {
   15076 	case WM_T_82571:
   15077 	case WM_T_82572:
   15078 		/*
   15079 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15080 		 * State Power management L1 State (ASPM L1).
   15081 		 */
   15082 		mask = PCIE_LCSR_ASPM_L1;
   15083 		str = "L1 is";
   15084 		break;
   15085 	case WM_T_82573:
   15086 	case WM_T_82574:
   15087 	case WM_T_82583:
   15088 		/*
   15089 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15090 		 *
   15091 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15092 		 * some chipset.  The document of 82574 and 82583 says that
   15093 		 * disabling L0s with some specific chipset is sufficient,
   15094 		 * but we follow as of the Intel em driver does.
   15095 		 *
   15096 		 * References:
   15097 		 * Errata 8 of the Specification Update of i82573.
   15098 		 * Errata 20 of the Specification Update of i82574.
   15099 		 * Errata 9 of the Specification Update of i82583.
   15100 		 */
   15101 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15102 		str = "L0s and L1 are";
   15103 		break;
   15104 	default:
   15105 		return;
   15106 	}
   15107 
   15108 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15109 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15110 	reg &= ~mask;
   15111 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15112 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15113 
   15114 	/* Print only in wm_attach() */
   15115 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15116 		aprint_verbose_dev(sc->sc_dev,
   15117 		    "ASPM %s disabled to workaround the errata.\n", str);
   15118 }
   15119 
   15120 /* LPLU */
   15121 
   15122 static void
   15123 wm_lplu_d0_disable(struct wm_softc *sc)
   15124 {
   15125 	struct mii_data *mii = &sc->sc_mii;
   15126 	uint32_t reg;
   15127 	uint16_t phyval;
   15128 
   15129 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15130 		device_xname(sc->sc_dev), __func__));
   15131 
   15132 	if (sc->sc_phytype == WMPHY_IFE)
   15133 		return;
   15134 
   15135 	switch (sc->sc_type) {
   15136 	case WM_T_82571:
   15137 	case WM_T_82572:
   15138 	case WM_T_82573:
   15139 	case WM_T_82575:
   15140 	case WM_T_82576:
   15141 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   15142 		phyval &= ~PMR_D0_LPLU;
   15143 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   15144 		break;
   15145 	case WM_T_82580:
   15146 	case WM_T_I350:
   15147 	case WM_T_I210:
   15148 	case WM_T_I211:
   15149 		reg = CSR_READ(sc, WMREG_PHPM);
   15150 		reg &= ~PHPM_D0A_LPLU;
   15151 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15152 		break;
   15153 	case WM_T_82574:
   15154 	case WM_T_82583:
   15155 	case WM_T_ICH8:
   15156 	case WM_T_ICH9:
   15157 	case WM_T_ICH10:
   15158 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15159 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15160 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15161 		CSR_WRITE_FLUSH(sc);
   15162 		break;
   15163 	case WM_T_PCH:
   15164 	case WM_T_PCH2:
   15165 	case WM_T_PCH_LPT:
   15166 	case WM_T_PCH_SPT:
   15167 	case WM_T_PCH_CNP:
   15168 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15169 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15170 		if (wm_phy_resetisblocked(sc) == false)
   15171 			phyval |= HV_OEM_BITS_ANEGNOW;
   15172 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15173 		break;
   15174 	default:
   15175 		break;
   15176 	}
   15177 }
   15178 
   15179 /* EEE */
   15180 
   15181 static int
   15182 wm_set_eee_i350(struct wm_softc *sc)
   15183 {
   15184 	struct ethercom *ec = &sc->sc_ethercom;
   15185 	uint32_t ipcnfg, eeer;
   15186 	uint32_t ipcnfg_mask
   15187 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15188 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15189 
   15190 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15191 
   15192 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15193 	eeer = CSR_READ(sc, WMREG_EEER);
   15194 
   15195 	/* enable or disable per user setting */
   15196 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15197 		ipcnfg |= ipcnfg_mask;
   15198 		eeer |= eeer_mask;
   15199 	} else {
   15200 		ipcnfg &= ~ipcnfg_mask;
   15201 		eeer &= ~eeer_mask;
   15202 	}
   15203 
   15204 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15205 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15206 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15207 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15208 
   15209 	return 0;
   15210 }
   15211 
   15212 static int
   15213 wm_set_eee_pchlan(struct wm_softc *sc)
   15214 {
   15215 	device_t dev = sc->sc_dev;
   15216 	struct ethercom *ec = &sc->sc_ethercom;
   15217 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15218 	int rv = 0;
   15219 
   15220 	switch (sc->sc_phytype) {
   15221 	case WMPHY_82579:
   15222 		lpa = I82579_EEE_LP_ABILITY;
   15223 		pcs_status = I82579_EEE_PCS_STATUS;
   15224 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15225 		break;
   15226 	case WMPHY_I217:
   15227 		lpa = I217_EEE_LP_ABILITY;
   15228 		pcs_status = I217_EEE_PCS_STATUS;
   15229 		adv_addr = I217_EEE_ADVERTISEMENT;
   15230 		break;
   15231 	default:
   15232 		return 0;
   15233 	}
   15234 
   15235 	if (sc->phy.acquire(sc)) {
   15236 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15237 		return 0;
   15238 	}
   15239 
   15240 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15241 	if (rv != 0)
   15242 		goto release;
   15243 
   15244 	/* Clear bits that enable EEE in various speeds */
   15245 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15246 
   15247 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15248 		/* Save off link partner's EEE ability */
   15249 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15250 		if (rv != 0)
   15251 			goto release;
   15252 
   15253 		/* Read EEE advertisement */
   15254 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15255 			goto release;
   15256 
   15257 		/*
   15258 		 * Enable EEE only for speeds in which the link partner is
   15259 		 * EEE capable and for which we advertise EEE.
   15260 		 */
   15261 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15262 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15263 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15264 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15265 			if ((data & ANLPAR_TX_FD) != 0)
   15266 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15267 			else {
   15268 				/*
   15269 				 * EEE is not supported in 100Half, so ignore
   15270 				 * partner's EEE in 100 ability if full-duplex
   15271 				 * is not advertised.
   15272 				 */
   15273 				sc->eee_lp_ability
   15274 				    &= ~AN_EEEADVERT_100_TX;
   15275 			}
   15276 		}
   15277 	}
   15278 
   15279 	if (sc->sc_phytype == WMPHY_82579) {
   15280 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15281 		if (rv != 0)
   15282 			goto release;
   15283 
   15284 		data &= ~I82579_LPI_PLL_SHUT_100;
   15285 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15286 	}
   15287 
   15288 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15289 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15290 		goto release;
   15291 
   15292 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15293 release:
   15294 	sc->phy.release(sc);
   15295 
   15296 	return rv;
   15297 }
   15298 
   15299 static int
   15300 wm_set_eee(struct wm_softc *sc)
   15301 {
   15302 	struct ethercom *ec = &sc->sc_ethercom;
   15303 
   15304 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15305 		return 0;
   15306 
   15307 	if (sc->sc_type == WM_T_I354) {
   15308 		/* I354 uses an external PHY */
   15309 		return 0; /* not yet */
   15310 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15311 		return wm_set_eee_i350(sc);
   15312 	else if (sc->sc_type >= WM_T_PCH2)
   15313 		return wm_set_eee_pchlan(sc);
   15314 
   15315 	return 0;
   15316 }
   15317 
   15318 /*
   15319  * Workarounds (mainly PHY related).
   15320  * Basically, PHY's workarounds are in the PHY drivers.
   15321  */
   15322 
   15323 /* Work-around for 82566 Kumeran PCS lock loss */
   15324 static int
   15325 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15326 {
   15327 	struct mii_data *mii = &sc->sc_mii;
   15328 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15329 	int i, reg, rv;
   15330 	uint16_t phyreg;
   15331 
   15332 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15333 		device_xname(sc->sc_dev), __func__));
   15334 
   15335 	/* If the link is not up, do nothing */
   15336 	if ((status & STATUS_LU) == 0)
   15337 		return 0;
   15338 
   15339 	/* Nothing to do if the link is other than 1Gbps */
   15340 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15341 		return 0;
   15342 
   15343 	for (i = 0; i < 10; i++) {
   15344 		/* read twice */
   15345 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15346 		if (rv != 0)
   15347 			return rv;
   15348 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15349 		if (rv != 0)
   15350 			return rv;
   15351 
   15352 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15353 			goto out;	/* GOOD! */
   15354 
   15355 		/* Reset the PHY */
   15356 		wm_reset_phy(sc);
   15357 		delay(5*1000);
   15358 	}
   15359 
   15360 	/* Disable GigE link negotiation */
   15361 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15362 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15363 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15364 
   15365 	/*
   15366 	 * Call gig speed drop workaround on Gig disable before accessing
   15367 	 * any PHY registers.
   15368 	 */
   15369 	wm_gig_downshift_workaround_ich8lan(sc);
   15370 
   15371 out:
   15372 	return 0;
   15373 }
   15374 
   15375 /*
   15376  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15377  *  @sc: pointer to the HW structure
   15378  *
   15379  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15380  *  LPLU, Gig disable, MDIC PHY reset):
   15381  *    1) Set Kumeran Near-end loopback
   15382  *    2) Clear Kumeran Near-end loopback
   15383  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15384  */
   15385 static void
   15386 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15387 {
   15388 	uint16_t kmreg;
   15389 
   15390 	/* Only for igp3 */
   15391 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15392 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15393 			return;
   15394 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15395 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15396 			return;
   15397 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15398 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15399 	}
   15400 }
   15401 
   15402 /*
   15403  * Workaround for pch's PHYs
   15404  * XXX should be moved to new PHY driver?
   15405  */
   15406 static int
   15407 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15408 {
   15409 	device_t dev = sc->sc_dev;
   15410 	struct mii_data *mii = &sc->sc_mii;
   15411 	struct mii_softc *child;
   15412 	uint16_t phy_data, phyrev = 0;
   15413 	int phytype = sc->sc_phytype;
   15414 	int rv;
   15415 
   15416 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15417 		device_xname(dev), __func__));
   15418 	KASSERT(sc->sc_type == WM_T_PCH);
   15419 
   15420 	/* Set MDIO slow mode before any other MDIO access */
   15421 	if (phytype == WMPHY_82577)
   15422 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15423 			return rv;
   15424 
   15425 	child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15426 	if (child != NULL)
   15427 		phyrev = child->mii_mpd_rev;
   15428 
   15429 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15430 	if ((child != NULL) &&
   15431 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15432 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15433 		/* Disable generation of early preamble (0x4431) */
   15434 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15435 		    &phy_data);
   15436 		if (rv != 0)
   15437 			return rv;
   15438 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15439 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15440 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15441 		    phy_data);
   15442 		if (rv != 0)
   15443 			return rv;
   15444 
   15445 		/* Preamble tuning for SSC */
   15446 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15447 		if (rv != 0)
   15448 			return rv;
   15449 	}
   15450 
   15451 	/* 82578 */
   15452 	if (phytype == WMPHY_82578) {
   15453 		/*
   15454 		 * Return registers to default by doing a soft reset then
   15455 		 * writing 0x3140 to the control register
   15456 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15457 		 */
   15458 		if ((child != NULL) && (phyrev < 2)) {
   15459 			PHY_RESET(child);
   15460 			rv = sc->sc_mii.mii_writereg(dev, 2, MII_BMCR,
   15461 			    0x3140);
   15462 			if (rv != 0)
   15463 				return rv;
   15464 		}
   15465 	}
   15466 
   15467 	/* Select page 0 */
   15468 	if ((rv = sc->phy.acquire(sc)) != 0)
   15469 		return rv;
   15470 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15471 	sc->phy.release(sc);
   15472 	if (rv != 0)
   15473 		return rv;
   15474 
   15475 	/*
   15476 	 * Configure the K1 Si workaround during phy reset assuming there is
   15477 	 * link so that it disables K1 if link is in 1Gbps.
   15478 	 */
   15479 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15480 		return rv;
   15481 
   15482 	/* Workaround for link disconnects on a busy hub in half duplex */
   15483 	rv = sc->phy.acquire(sc);
   15484 	if (rv)
   15485 		return rv;
   15486 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15487 	if (rv)
   15488 		goto release;
   15489 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15490 	    phy_data & 0x00ff);
   15491 	if (rv)
   15492 		goto release;
   15493 
   15494 	/* set MSE higher to enable link to stay up when noise is high */
   15495 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15496 release:
   15497 	sc->phy.release(sc);
   15498 
   15499 	return rv;
   15500 
   15501 
   15502 }
   15503 
   15504 /*
   15505  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15506  *  @sc:   pointer to the HW structure
   15507  */
   15508 static void
   15509 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15510 {
   15511 	device_t dev = sc->sc_dev;
   15512 	uint32_t mac_reg;
   15513 	uint16_t i, wuce;
   15514 	int count;
   15515 
   15516 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15517 		device_xname(sc->sc_dev), __func__));
   15518 
   15519 	if (sc->phy.acquire(sc) != 0)
   15520 		return;
   15521 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15522 		goto release;
   15523 
   15524 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15525 	count = wm_rar_count(sc);
   15526 	for (i = 0; i < count; i++) {
   15527 		uint16_t lo, hi;
   15528 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15529 		lo = (uint16_t)(mac_reg & 0xffff);
   15530 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15531 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15532 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15533 
   15534 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15535 		lo = (uint16_t)(mac_reg & 0xffff);
   15536 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15537 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15538 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15539 	}
   15540 
   15541 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15542 
   15543 release:
   15544 	sc->phy.release(sc);
   15545 }
   15546 
   15547 /*
   15548  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15549  *  done after every PHY reset.
   15550  */
   15551 static int
   15552 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15553 {
   15554 	device_t dev = sc->sc_dev;
   15555 	int rv;
   15556 
   15557 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15558 		device_xname(dev), __func__));
   15559 	KASSERT(sc->sc_type == WM_T_PCH2);
   15560 
   15561 	/* Set MDIO slow mode before any other MDIO access */
   15562 	rv = wm_set_mdio_slow_mode_hv(sc);
   15563 	if (rv != 0)
   15564 		return rv;
   15565 
   15566 	rv = sc->phy.acquire(sc);
   15567 	if (rv != 0)
   15568 		return rv;
   15569 	/* set MSE higher to enable link to stay up when noise is high */
   15570 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15571 	if (rv != 0)
   15572 		goto release;
   15573 	/* drop link after 5 times MSE threshold was reached */
   15574 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15575 release:
   15576 	sc->phy.release(sc);
   15577 
   15578 	return rv;
   15579 }
   15580 
   15581 /**
   15582  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15583  *  @link: link up bool flag
   15584  *
   15585  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15586  *  preventing further DMA write requests.  Workaround the issue by disabling
   15587  *  the de-assertion of the clock request when in 1Gpbs mode.
   15588  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15589  *  speeds in order to avoid Tx hangs.
   15590  **/
   15591 static int
   15592 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15593 {
   15594 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15595 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15596 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15597 	uint16_t phyreg;
   15598 
   15599 	if (link && (speed == STATUS_SPEED_1000)) {
   15600 		sc->phy.acquire(sc);
   15601 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15602 		    &phyreg);
   15603 		if (rv != 0)
   15604 			goto release;
   15605 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15606 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15607 		if (rv != 0)
   15608 			goto release;
   15609 		delay(20);
   15610 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15611 
   15612 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15613 		    &phyreg);
   15614 release:
   15615 		sc->phy.release(sc);
   15616 		return rv;
   15617 	}
   15618 
   15619 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15620 
   15621 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15622 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15623 	    || !link
   15624 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15625 		goto update_fextnvm6;
   15626 
   15627 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15628 
   15629 	/* Clear link status transmit timeout */
   15630 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15631 	if (speed == STATUS_SPEED_100) {
   15632 		/* Set inband Tx timeout to 5x10us for 100Half */
   15633 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15634 
   15635 		/* Do not extend the K1 entry latency for 100Half */
   15636 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15637 	} else {
   15638 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15639 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15640 
   15641 		/* Extend the K1 entry latency for 10 Mbps */
   15642 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15643 	}
   15644 
   15645 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15646 
   15647 update_fextnvm6:
   15648 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15649 	return 0;
   15650 }
   15651 
   15652 /*
   15653  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15654  *  @sc:   pointer to the HW structure
   15655  *  @link: link up bool flag
   15656  *
   15657  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15658  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15659  *  If link is down, the function will restore the default K1 setting located
   15660  *  in the NVM.
   15661  */
   15662 static int
   15663 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15664 {
   15665 	int k1_enable = sc->sc_nvm_k1_enabled;
   15666 
   15667 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15668 		device_xname(sc->sc_dev), __func__));
   15669 
   15670 	if (sc->phy.acquire(sc) != 0)
   15671 		return -1;
   15672 
   15673 	if (link) {
   15674 		k1_enable = 0;
   15675 
   15676 		/* Link stall fix for link up */
   15677 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15678 		    0x0100);
   15679 	} else {
   15680 		/* Link stall fix for link down */
   15681 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15682 		    0x4100);
   15683 	}
   15684 
   15685 	wm_configure_k1_ich8lan(sc, k1_enable);
   15686 	sc->phy.release(sc);
   15687 
   15688 	return 0;
   15689 }
   15690 
   15691 /*
   15692  *  wm_k1_workaround_lv - K1 Si workaround
   15693  *  @sc:   pointer to the HW structure
   15694  *
   15695  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   15696  *  Disable K1 for 1000 and 100 speeds
   15697  */
   15698 static int
   15699 wm_k1_workaround_lv(struct wm_softc *sc)
   15700 {
   15701 	uint32_t reg;
   15702 	uint16_t phyreg;
   15703 	int rv;
   15704 
   15705 	if (sc->sc_type != WM_T_PCH2)
   15706 		return 0;
   15707 
   15708 	/* Set K1 beacon duration based on 10Mbps speed */
   15709 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   15710 	if (rv != 0)
   15711 		return rv;
   15712 
   15713 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   15714 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   15715 		if (phyreg &
   15716 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   15717 			/* LV 1G/100 Packet drop issue wa  */
   15718 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   15719 			    &phyreg);
   15720 			if (rv != 0)
   15721 				return rv;
   15722 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   15723 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   15724 			    phyreg);
   15725 			if (rv != 0)
   15726 				return rv;
   15727 		} else {
   15728 			/* For 10Mbps */
   15729 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   15730 			reg &= ~FEXTNVM4_BEACON_DURATION;
   15731 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   15732 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   15733 		}
   15734 	}
   15735 
   15736 	return 0;
   15737 }
   15738 
   15739 /*
   15740  *  wm_link_stall_workaround_hv - Si workaround
   15741  *  @sc: pointer to the HW structure
   15742  *
   15743  *  This function works around a Si bug where the link partner can get
   15744  *  a link up indication before the PHY does. If small packets are sent
   15745  *  by the link partner they can be placed in the packet buffer without
   15746  *  being properly accounted for by the PHY and will stall preventing
   15747  *  further packets from being received.  The workaround is to clear the
   15748  *  packet buffer after the PHY detects link up.
   15749  */
   15750 static int
   15751 wm_link_stall_workaround_hv(struct wm_softc *sc)
   15752 {
   15753 	uint16_t phyreg;
   15754 
   15755 	if (sc->sc_phytype != WMPHY_82578)
   15756 		return 0;
   15757 
   15758 	/* Do not apply workaround if in PHY loopback bit 14 set */
   15759 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   15760 	if ((phyreg & BMCR_LOOP) != 0)
   15761 		return 0;
   15762 
   15763 	/* check if link is up and at 1Gbps */
   15764 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   15765 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15766 	    | BM_CS_STATUS_SPEED_MASK;
   15767 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15768 		| BM_CS_STATUS_SPEED_1000))
   15769 		return 0;
   15770 
   15771 	delay(200 * 1000);	/* XXX too big */
   15772 
   15773 	/* flush the packets in the fifo buffer */
   15774 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15775 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   15776 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15777 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   15778 
   15779 	return 0;
   15780 }
   15781 
   15782 static int
   15783 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   15784 {
   15785 	int rv;
   15786 	uint16_t reg;
   15787 
   15788 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   15789 	if (rv != 0)
   15790 		return rv;
   15791 
   15792 	return  wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   15793 	    reg | HV_KMRN_MDIO_SLOW);
   15794 }
   15795 
   15796 /*
   15797  *  wm_configure_k1_ich8lan - Configure K1 power state
   15798  *  @sc: pointer to the HW structure
   15799  *  @enable: K1 state to configure
   15800  *
   15801  *  Configure the K1 power state based on the provided parameter.
   15802  *  Assumes semaphore already acquired.
   15803  */
   15804 static void
   15805 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   15806 {
   15807 	uint32_t ctrl, ctrl_ext, tmp;
   15808 	uint16_t kmreg;
   15809 	int rv;
   15810 
   15811 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15812 
   15813 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   15814 	if (rv != 0)
   15815 		return;
   15816 
   15817 	if (k1_enable)
   15818 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   15819 	else
   15820 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   15821 
   15822 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   15823 	if (rv != 0)
   15824 		return;
   15825 
   15826 	delay(20);
   15827 
   15828 	ctrl = CSR_READ(sc, WMREG_CTRL);
   15829 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   15830 
   15831 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   15832 	tmp |= CTRL_FRCSPD;
   15833 
   15834 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   15835 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   15836 	CSR_WRITE_FLUSH(sc);
   15837 	delay(20);
   15838 
   15839 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   15840 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   15841 	CSR_WRITE_FLUSH(sc);
   15842 	delay(20);
   15843 
   15844 	return;
   15845 }
   15846 
   15847 /* special case - for 82575 - need to do manual init ... */
   15848 static void
   15849 wm_reset_init_script_82575(struct wm_softc *sc)
   15850 {
   15851 	/*
   15852 	 * remark: this is untested code - we have no board without EEPROM
   15853 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   15854 	 */
   15855 
   15856 	/* SerDes configuration via SERDESCTRL */
   15857 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   15858 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   15859 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   15860 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   15861 
   15862 	/* CCM configuration via CCMCTL register */
   15863 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   15864 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   15865 
   15866 	/* PCIe lanes configuration */
   15867 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   15868 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   15869 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   15870 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   15871 
   15872 	/* PCIe PLL Configuration */
   15873 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   15874 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   15875 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   15876 }
   15877 
   15878 static void
   15879 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   15880 {
   15881 	uint32_t reg;
   15882 	uint16_t nvmword;
   15883 	int rv;
   15884 
   15885 	if (sc->sc_type != WM_T_82580)
   15886 		return;
   15887 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   15888 		return;
   15889 
   15890 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   15891 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   15892 	if (rv != 0) {
   15893 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   15894 		    __func__);
   15895 		return;
   15896 	}
   15897 
   15898 	reg = CSR_READ(sc, WMREG_MDICNFG);
   15899 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   15900 		reg |= MDICNFG_DEST;
   15901 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   15902 		reg |= MDICNFG_COM_MDIO;
   15903 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15904 }
   15905 
   15906 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   15907 
   15908 static bool
   15909 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   15910 {
   15911 	uint32_t reg;
   15912 	uint16_t id1, id2;
   15913 	int i, rv;
   15914 
   15915 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15916 		device_xname(sc->sc_dev), __func__));
   15917 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15918 
   15919 	id1 = id2 = 0xffff;
   15920 	for (i = 0; i < 2; i++) {
   15921 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   15922 		    &id1);
   15923 		if ((rv != 0) || MII_INVALIDID(id1))
   15924 			continue;
   15925 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   15926 		    &id2);
   15927 		if ((rv != 0) || MII_INVALIDID(id2))
   15928 			continue;
   15929 		break;
   15930 	}
   15931 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   15932 		goto out;
   15933 
   15934 	/*
   15935 	 * In case the PHY needs to be in mdio slow mode,
   15936 	 * set slow mode and try to get the PHY id again.
   15937 	 */
   15938 	rv = 0;
   15939 	if (sc->sc_type < WM_T_PCH_LPT) {
   15940 		sc->phy.release(sc);
   15941 		wm_set_mdio_slow_mode_hv(sc);
   15942 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   15943 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   15944 		sc->phy.acquire(sc);
   15945 	}
   15946 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   15947 		printf("XXX return with false\n");
   15948 		return false;
   15949 	}
   15950 out:
   15951 	if (sc->sc_type >= WM_T_PCH_LPT) {
   15952 		/* Only unforce SMBus if ME is not active */
   15953 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15954 			uint16_t phyreg;
   15955 
   15956 			/* Unforce SMBus mode in PHY */
   15957 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   15958 			    CV_SMB_CTRL, &phyreg);
   15959 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15960 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   15961 			    CV_SMB_CTRL, phyreg);
   15962 
   15963 			/* Unforce SMBus mode in MAC */
   15964 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15965 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15966 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15967 		}
   15968 	}
   15969 	return true;
   15970 }
   15971 
   15972 static void
   15973 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   15974 {
   15975 	uint32_t reg;
   15976 	int i;
   15977 
   15978 	/* Set PHY Config Counter to 50msec */
   15979 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   15980 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   15981 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   15982 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   15983 
   15984 	/* Toggle LANPHYPC */
   15985 	reg = CSR_READ(sc, WMREG_CTRL);
   15986 	reg |= CTRL_LANPHYPC_OVERRIDE;
   15987 	reg &= ~CTRL_LANPHYPC_VALUE;
   15988 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15989 	CSR_WRITE_FLUSH(sc);
   15990 	delay(1000);
   15991 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   15992 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15993 	CSR_WRITE_FLUSH(sc);
   15994 
   15995 	if (sc->sc_type < WM_T_PCH_LPT)
   15996 		delay(50 * 1000);
   15997 	else {
   15998 		i = 20;
   15999 
   16000 		do {
   16001 			delay(5 * 1000);
   16002 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16003 		    && i--);
   16004 
   16005 		delay(30 * 1000);
   16006 	}
   16007 }
   16008 
   16009 static int
   16010 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16011 {
   16012 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16013 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16014 	uint32_t rxa;
   16015 	uint16_t scale = 0, lat_enc = 0;
   16016 	int32_t obff_hwm = 0;
   16017 	int64_t lat_ns, value;
   16018 
   16019 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16020 		device_xname(sc->sc_dev), __func__));
   16021 
   16022 	if (link) {
   16023 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16024 		uint32_t status;
   16025 		uint16_t speed;
   16026 		pcireg_t preg;
   16027 
   16028 		status = CSR_READ(sc, WMREG_STATUS);
   16029 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16030 		case STATUS_SPEED_10:
   16031 			speed = 10;
   16032 			break;
   16033 		case STATUS_SPEED_100:
   16034 			speed = 100;
   16035 			break;
   16036 		case STATUS_SPEED_1000:
   16037 			speed = 1000;
   16038 			break;
   16039 		default:
   16040 			device_printf(sc->sc_dev, "Unknown speed "
   16041 			    "(status = %08x)\n", status);
   16042 			return -1;
   16043 		}
   16044 
   16045 		/* Rx Packet Buffer Allocation size (KB) */
   16046 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16047 
   16048 		/*
   16049 		 * Determine the maximum latency tolerated by the device.
   16050 		 *
   16051 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16052 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16053 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16054 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16055 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16056 		 */
   16057 		lat_ns = ((int64_t)rxa * 1024 -
   16058 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16059 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16060 		if (lat_ns < 0)
   16061 			lat_ns = 0;
   16062 		else
   16063 			lat_ns /= speed;
   16064 		value = lat_ns;
   16065 
   16066 		while (value > LTRV_VALUE) {
   16067 			scale ++;
   16068 			value = howmany(value, __BIT(5));
   16069 		}
   16070 		if (scale > LTRV_SCALE_MAX) {
   16071 			printf("%s: Invalid LTR latency scale %d\n",
   16072 			    device_xname(sc->sc_dev), scale);
   16073 			return -1;
   16074 		}
   16075 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16076 
   16077 		/* Determine the maximum latency tolerated by the platform */
   16078 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16079 		    WM_PCI_LTR_CAP_LPT);
   16080 		max_snoop = preg & 0xffff;
   16081 		max_nosnoop = preg >> 16;
   16082 
   16083 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16084 
   16085 		if (lat_enc > max_ltr_enc) {
   16086 			lat_enc = max_ltr_enc;
   16087 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16088 			    * PCI_LTR_SCALETONS(
   16089 				    __SHIFTOUT(lat_enc,
   16090 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16091 		}
   16092 
   16093 		if (lat_ns) {
   16094 			lat_ns *= speed * 1000;
   16095 			lat_ns /= 8;
   16096 			lat_ns /= 1000000000;
   16097 			obff_hwm = (int32_t)(rxa - lat_ns);
   16098 		}
   16099 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16100 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16101 			    "(rxa = %d, lat_ns = %d)\n",
   16102 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16103 			return -1;
   16104 		}
   16105 	}
   16106 	/* Snoop and No-Snoop latencies the same */
   16107 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16108 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16109 
   16110 	/* Set OBFF high water mark */
   16111 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16112 	reg |= obff_hwm;
   16113 	CSR_WRITE(sc, WMREG_SVT, reg);
   16114 
   16115 	/* Enable OBFF */
   16116 	reg = CSR_READ(sc, WMREG_SVCR);
   16117 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16118 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16119 
   16120 	return 0;
   16121 }
   16122 
   16123 /*
   16124  * I210 Errata 25 and I211 Errata 10
   16125  * Slow System Clock.
   16126  */
   16127 static int
   16128 wm_pll_workaround_i210(struct wm_softc *sc)
   16129 {
   16130 	uint32_t mdicnfg, wuc;
   16131 	uint32_t reg;
   16132 	pcireg_t pcireg;
   16133 	uint32_t pmreg;
   16134 	uint16_t nvmword, tmp_nvmword;
   16135 	uint16_t phyval;
   16136 	bool wa_done = false;
   16137 	int i, rv = 0;
   16138 
   16139 	/* Get Power Management cap offset */
   16140 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16141 	    &pmreg, NULL) == 0)
   16142 		return -1;
   16143 
   16144 	/* Save WUC and MDICNFG registers */
   16145 	wuc = CSR_READ(sc, WMREG_WUC);
   16146 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16147 
   16148 	reg = mdicnfg & ~MDICNFG_DEST;
   16149 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16150 
   16151 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   16152 		nvmword = INVM_DEFAULT_AL;
   16153 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16154 
   16155 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16156 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16157 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16158 
   16159 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16160 			rv = 0;
   16161 			break; /* OK */
   16162 		} else
   16163 			rv = -1;
   16164 
   16165 		wa_done = true;
   16166 		/* Directly reset the internal PHY */
   16167 		reg = CSR_READ(sc, WMREG_CTRL);
   16168 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16169 
   16170 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16171 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16172 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16173 
   16174 		CSR_WRITE(sc, WMREG_WUC, 0);
   16175 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16176 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16177 
   16178 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16179 		    pmreg + PCI_PMCSR);
   16180 		pcireg |= PCI_PMCSR_STATE_D3;
   16181 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16182 		    pmreg + PCI_PMCSR, pcireg);
   16183 		delay(1000);
   16184 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16185 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16186 		    pmreg + PCI_PMCSR, pcireg);
   16187 
   16188 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16189 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16190 
   16191 		/* Restore WUC register */
   16192 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16193 	}
   16194 
   16195 	/* Restore MDICNFG setting */
   16196 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16197 	if (wa_done)
   16198 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16199 	return rv;
   16200 }
   16201 
   16202 static void
   16203 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16204 {
   16205 	uint32_t reg;
   16206 
   16207 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16208 		device_xname(sc->sc_dev), __func__));
   16209 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16210 	    || (sc->sc_type == WM_T_PCH_CNP));
   16211 
   16212 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16213 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16214 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16215 
   16216 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16217 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16218 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16219 }
   16220