Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.637
      1 /*	$NetBSD: if_wm.c,v 1.637 2019/05/23 10:57:28 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.637 2019/05/23 10:57:28 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 
    109 #include <sys/rndsource.h>
    110 
    111 #include <net/if.h>
    112 #include <net/if_dl.h>
    113 #include <net/if_media.h>
    114 #include <net/if_ether.h>
    115 
    116 #include <net/bpf.h>
    117 
    118 #include <net/rss_config.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/mdio.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	do { if (wm_debug & (x)) printf y; } while (0)
    161 #else
    162 #define	DPRINTF(x, y)	__nothing
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544. We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 64 DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    203  * m_defrag() is called to reduce it.
    204  */
    205 #define	WM_NTXSEGS		64
    206 #define	WM_IFQUEUELEN		256
    207 #define	WM_TXQUEUELEN_MAX	64
    208 #define	WM_TXQUEUELEN_MAX_82547	16
    209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    212 #define	WM_NTXDESC_82542	256
    213 #define	WM_NTXDESC_82544	4096
    214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    219 
    220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    221 
    222 #define	WM_TXINTERQSIZE		256
    223 
    224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    226 #endif
    227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    229 #endif
    230 
    231 /*
    232  * Receive descriptor list size.  We have one Rx buffer for normal
    233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    234  * packet.  We allocate 256 receive descriptors, each with a 2k
    235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    236  */
    237 #define	WM_NRXDESC		256
    238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    241 
    242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 typedef union txdescs {
    250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    252 } txdescs_t;
    253 
    254 typedef union rxdescs {
    255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    258 } rxdescs_t;
    259 
    260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    262 
    263 /*
    264  * Software state for transmit jobs.
    265  */
    266 struct wm_txsoft {
    267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    269 	int txs_firstdesc;		/* first descriptor in packet */
    270 	int txs_lastdesc;		/* last descriptor in packet */
    271 	int txs_ndesc;			/* # of descriptors used */
    272 };
    273 
    274 /*
    275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    277  * them together.
    278  */
    279 struct wm_rxsoft {
    280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    282 };
    283 
    284 #define WM_LINKUP_TIMEOUT	50
    285 
    286 static uint16_t swfwphysem[] = {
    287 	SWFW_PHY0_SM,
    288 	SWFW_PHY1_SM,
    289 	SWFW_PHY2_SM,
    290 	SWFW_PHY3_SM
    291 };
    292 
    293 static const uint32_t wm_82580_rxpbs_table[] = {
    294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    295 };
    296 
    297 struct wm_softc;
    298 
    299 #ifdef WM_EVENT_COUNTERS
    300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    302 	struct evcnt qname##_ev_##evname;
    303 
    304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    305 	do {								\
    306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    308 		    "%s%02d%s", #qname, (qnum), #evname);		\
    309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    310 		    (evtype), NULL, (xname),				\
    311 		    (q)->qname##_##evname##_evcnt_name);		\
    312 	} while (0)
    313 
    314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    316 
    317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    319 
    320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    321 	evcnt_detach(&(q)->qname##_ev_##evname);
    322 #endif /* WM_EVENT_COUNTERS */
    323 
    324 struct wm_txqueue {
    325 	kmutex_t *txq_lock;		/* lock for tx operations */
    326 
    327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    328 
    329 	/* Software state for the transmit descriptors. */
    330 	int txq_num;			/* must be a power of two */
    331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    332 
    333 	/* TX control data structures. */
    334 	int txq_ndesc;			/* must be a power of two */
    335 	size_t txq_descsize;		/* a tx descriptor size */
    336 	txdescs_t *txq_descs_u;
    337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    339 	int txq_desc_rseg;		/* real number of control segment */
    340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    341 #define	txq_descs	txq_descs_u->sctxu_txdescs
    342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    343 
    344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    345 
    346 	int txq_free;			/* number of free Tx descriptors */
    347 	int txq_next;			/* next ready Tx descriptor */
    348 
    349 	int txq_sfree;			/* number of free Tx jobs */
    350 	int txq_snext;			/* next free Tx job */
    351 	int txq_sdirty;			/* dirty Tx jobs */
    352 
    353 	/* These 4 variables are used only on the 82547. */
    354 	int txq_fifo_size;		/* Tx FIFO size */
    355 	int txq_fifo_head;		/* current head of FIFO */
    356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    358 
    359 	/*
    360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    361 	 * CPUs. This queue intermediate them without block.
    362 	 */
    363 	pcq_t *txq_interq;
    364 
    365 	/*
    366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    367 	 * to manage Tx H/W queue's busy flag.
    368 	 */
    369 	int txq_flags;			/* flags for H/W queue, see below */
    370 #define	WM_TXQ_NO_SPACE	0x1
    371 
    372 	bool txq_stopping;
    373 
    374 	bool txq_sending;
    375 	time_t txq_lastsent;
    376 
    377 	uint32_t txq_packets;		/* for AIM */
    378 	uint32_t txq_bytes;		/* for AIM */
    379 #ifdef WM_EVENT_COUNTERS
    380 	/* TX event counters */
    381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    386 					    /* XXX not used? */
    387 
    388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    393 	WM_Q_EVCNT_DEFINE(txq, tsopain)     /* Painful header manip. for TSO */
    394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    396 					    /* other than toomanyseg */
    397 
    398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    401 
    402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    404 #endif /* WM_EVENT_COUNTERS */
    405 };
    406 
    407 struct wm_rxqueue {
    408 	kmutex_t *rxq_lock;		/* lock for rx operations */
    409 
    410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    411 
    412 	/* Software state for the receive descriptors. */
    413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    414 
    415 	/* RX control data structures. */
    416 	int rxq_ndesc;			/* must be a power of two */
    417 	size_t rxq_descsize;		/* a rx descriptor size */
    418 	rxdescs_t *rxq_descs_u;
    419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    421 	int rxq_desc_rseg;		/* real number of control segment */
    422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    426 
    427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    428 
    429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    430 	int rxq_discard;
    431 	int rxq_len;
    432 	struct mbuf *rxq_head;
    433 	struct mbuf *rxq_tail;
    434 	struct mbuf **rxq_tailp;
    435 
    436 	bool rxq_stopping;
    437 
    438 	uint32_t rxq_packets;		/* for AIM */
    439 	uint32_t rxq_bytes;		/* for AIM */
    440 #ifdef WM_EVENT_COUNTERS
    441 	/* RX event counters */
    442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    444 
    445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    447 #endif
    448 };
    449 
    450 struct wm_queue {
    451 	int wmq_id;			/* index of TX/RX queues */
    452 	int wmq_intr_idx;		/* index of MSI-X tables */
    453 
    454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    455 	bool wmq_set_itr;
    456 
    457 	struct wm_txqueue wmq_txq;
    458 	struct wm_rxqueue wmq_rxq;
    459 
    460 	void *wmq_si;
    461 };
    462 
    463 struct wm_phyop {
    464 	int (*acquire)(struct wm_softc *);
    465 	void (*release)(struct wm_softc *);
    466 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    467 	int (*writereg_locked)(device_t, int, int, uint16_t);
    468 	int reset_delay_us;
    469 };
    470 
    471 struct wm_nvmop {
    472 	int (*acquire)(struct wm_softc *);
    473 	void (*release)(struct wm_softc *);
    474 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    475 };
    476 
    477 /*
    478  * Software state per device.
    479  */
    480 struct wm_softc {
    481 	device_t sc_dev;		/* generic device information */
    482 	bus_space_tag_t sc_st;		/* bus space tag */
    483 	bus_space_handle_t sc_sh;	/* bus space handle */
    484 	bus_size_t sc_ss;		/* bus space size */
    485 	bus_space_tag_t sc_iot;		/* I/O space tag */
    486 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    487 	bus_size_t sc_ios;		/* I/O space size */
    488 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    489 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    490 	bus_size_t sc_flashs;		/* flash registers space size */
    491 	off_t sc_flashreg_offset;	/*
    492 					 * offset to flash registers from
    493 					 * start of BAR
    494 					 */
    495 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    496 
    497 	struct ethercom sc_ethercom;	/* ethernet common data */
    498 	struct mii_data sc_mii;		/* MII/media information */
    499 
    500 	pci_chipset_tag_t sc_pc;
    501 	pcitag_t sc_pcitag;
    502 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    503 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    504 
    505 	uint16_t sc_pcidevid;		/* PCI device ID */
    506 	wm_chip_type sc_type;		/* MAC type */
    507 	int sc_rev;			/* MAC revision */
    508 	wm_phy_type sc_phytype;		/* PHY type */
    509 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    510 #define	WM_MEDIATYPE_UNKNOWN		0x00
    511 #define	WM_MEDIATYPE_FIBER		0x01
    512 #define	WM_MEDIATYPE_COPPER		0x02
    513 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    514 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    515 	int sc_flags;			/* flags; see below */
    516 	int sc_if_flags;		/* last if_flags */
    517 	int sc_ec_capenable;		/* last ec_capenable */
    518 	int sc_flowflags;		/* 802.3x flow control flags */
    519 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    520 	int sc_align_tweak;
    521 
    522 	void *sc_ihs[WM_MAX_NINTR];	/*
    523 					 * interrupt cookie.
    524 					 * - legacy and msi use sc_ihs[0] only
    525 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    526 					 */
    527 	pci_intr_handle_t *sc_intrs;	/*
    528 					 * legacy and msi use sc_intrs[0] only
    529 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    530 					 */
    531 	int sc_nintrs;			/* number of interrupts */
    532 
    533 	int sc_link_intr_idx;		/* index of MSI-X tables */
    534 
    535 	callout_t sc_tick_ch;		/* tick callout */
    536 	bool sc_core_stopping;
    537 
    538 	int sc_nvm_ver_major;
    539 	int sc_nvm_ver_minor;
    540 	int sc_nvm_ver_build;
    541 	int sc_nvm_addrbits;		/* NVM address bits */
    542 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    543 	int sc_ich8_flash_base;
    544 	int sc_ich8_flash_bank_size;
    545 	int sc_nvm_k1_enabled;
    546 
    547 	int sc_nqueues;
    548 	struct wm_queue *sc_queue;
    549 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    550 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    551 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    552 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    553 
    554 	int sc_affinity_offset;
    555 
    556 #ifdef WM_EVENT_COUNTERS
    557 	/* Event counters. */
    558 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    559 
    560 	/* WM_T_82542_2_1 only */
    561 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    562 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    563 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    564 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    565 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    566 #endif /* WM_EVENT_COUNTERS */
    567 
    568 	/* This variable are used only on the 82547. */
    569 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    570 
    571 	uint32_t sc_ctrl;		/* prototype CTRL register */
    572 #if 0
    573 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    574 #endif
    575 	uint32_t sc_icr;		/* prototype interrupt bits */
    576 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    577 	uint32_t sc_tctl;		/* prototype TCTL register */
    578 	uint32_t sc_rctl;		/* prototype RCTL register */
    579 	uint32_t sc_txcw;		/* prototype TXCW register */
    580 	uint32_t sc_tipg;		/* prototype TIPG register */
    581 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    582 	uint32_t sc_pba;		/* prototype PBA register */
    583 
    584 	int sc_tbi_linkup;		/* TBI link status */
    585 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    586 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    587 
    588 	int sc_mchash_type;		/* multicast filter offset */
    589 
    590 	krndsource_t rnd_source;	/* random source */
    591 
    592 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    593 
    594 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    595 	kmutex_t *sc_ich_phymtx;	/*
    596 					 * 82574/82583/ICH/PCH specific PHY
    597 					 * mutex. For 82574/82583, the mutex
    598 					 * is used for both PHY and NVM.
    599 					 */
    600 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    601 
    602 	struct wm_phyop phy;
    603 	struct wm_nvmop nvm;
    604 };
    605 
    606 #define WM_CORE_LOCK(_sc)						\
    607 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    608 #define WM_CORE_UNLOCK(_sc)						\
    609 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    610 #define WM_CORE_LOCKED(_sc)						\
    611 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    612 
    613 #define	WM_RXCHAIN_RESET(rxq)						\
    614 do {									\
    615 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    616 	*(rxq)->rxq_tailp = NULL;					\
    617 	(rxq)->rxq_len = 0;						\
    618 } while (/*CONSTCOND*/0)
    619 
    620 #define	WM_RXCHAIN_LINK(rxq, m)						\
    621 do {									\
    622 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    623 	(rxq)->rxq_tailp = &(m)->m_next;				\
    624 } while (/*CONSTCOND*/0)
    625 
    626 #ifdef WM_EVENT_COUNTERS
    627 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    628 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    629 
    630 #define WM_Q_EVCNT_INCR(qname, evname)			\
    631 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    632 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    633 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    634 #else /* !WM_EVENT_COUNTERS */
    635 #define	WM_EVCNT_INCR(ev)	/* nothing */
    636 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    637 
    638 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    639 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    640 #endif /* !WM_EVENT_COUNTERS */
    641 
    642 #define	CSR_READ(sc, reg)						\
    643 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    644 #define	CSR_WRITE(sc, reg, val)						\
    645 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    646 #define	CSR_WRITE_FLUSH(sc)						\
    647 	(void)CSR_READ((sc), WMREG_STATUS)
    648 
    649 #define ICH8_FLASH_READ32(sc, reg)					\
    650 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    651 	    (reg) + sc->sc_flashreg_offset)
    652 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    653 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    654 	    (reg) + sc->sc_flashreg_offset, (data))
    655 
    656 #define ICH8_FLASH_READ16(sc, reg)					\
    657 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    658 	    (reg) + sc->sc_flashreg_offset)
    659 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    660 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    661 	    (reg) + sc->sc_flashreg_offset, (data))
    662 
    663 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    664 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    665 
    666 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    667 #define	WM_CDTXADDR_HI(txq, x)						\
    668 	(sizeof(bus_addr_t) == 8 ?					\
    669 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    670 
    671 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    672 #define	WM_CDRXADDR_HI(rxq, x)						\
    673 	(sizeof(bus_addr_t) == 8 ?					\
    674 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    675 
    676 /*
    677  * Register read/write functions.
    678  * Other than CSR_{READ|WRITE}().
    679  */
    680 #if 0
    681 static inline uint32_t wm_io_read(struct wm_softc *, int);
    682 #endif
    683 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    684 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    685     uint32_t, uint32_t);
    686 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    687 
    688 /*
    689  * Descriptor sync/init functions.
    690  */
    691 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    692 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    693 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    694 
    695 /*
    696  * Device driver interface functions and commonly used functions.
    697  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    698  */
    699 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    700 static int	wm_match(device_t, cfdata_t, void *);
    701 static void	wm_attach(device_t, device_t, void *);
    702 static int	wm_detach(device_t, int);
    703 static bool	wm_suspend(device_t, const pmf_qual_t *);
    704 static bool	wm_resume(device_t, const pmf_qual_t *);
    705 static void	wm_watchdog(struct ifnet *);
    706 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    707     uint16_t *);
    708 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    709     uint16_t *);
    710 static void	wm_tick(void *);
    711 static int	wm_ifflags_cb(struct ethercom *);
    712 static int	wm_ioctl(struct ifnet *, u_long, void *);
    713 /* MAC address related */
    714 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    715 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    716 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    717 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    718 static int	wm_rar_count(struct wm_softc *);
    719 static void	wm_set_filter(struct wm_softc *);
    720 /* Reset and init related */
    721 static void	wm_set_vlan(struct wm_softc *);
    722 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    723 static void	wm_get_auto_rd_done(struct wm_softc *);
    724 static void	wm_lan_init_done(struct wm_softc *);
    725 static void	wm_get_cfg_done(struct wm_softc *);
    726 static int	wm_phy_post_reset(struct wm_softc *);
    727 static int	wm_write_smbus_addr(struct wm_softc *);
    728 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    729 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    730 static void	wm_initialize_hardware_bits(struct wm_softc *);
    731 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    732 static int	wm_reset_phy(struct wm_softc *);
    733 static void	wm_flush_desc_rings(struct wm_softc *);
    734 static void	wm_reset(struct wm_softc *);
    735 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    736 static void	wm_rxdrain(struct wm_rxqueue *);
    737 static void	wm_init_rss(struct wm_softc *);
    738 static void	wm_adjust_qnum(struct wm_softc *, int);
    739 static inline bool	wm_is_using_msix(struct wm_softc *);
    740 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    741 static int	wm_softint_establish(struct wm_softc *, int, int);
    742 static int	wm_setup_legacy(struct wm_softc *);
    743 static int	wm_setup_msix(struct wm_softc *);
    744 static int	wm_init(struct ifnet *);
    745 static int	wm_init_locked(struct ifnet *);
    746 static void	wm_unset_stopping_flags(struct wm_softc *);
    747 static void	wm_set_stopping_flags(struct wm_softc *);
    748 static void	wm_stop(struct ifnet *, int);
    749 static void	wm_stop_locked(struct ifnet *, int);
    750 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    751 static void	wm_82547_txfifo_stall(void *);
    752 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    753 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    754 /* DMA related */
    755 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    756 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    757 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    758 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    759     struct wm_txqueue *);
    760 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    761 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    762 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    763     struct wm_rxqueue *);
    764 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    765 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    766 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    767 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    768 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    769 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    770 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    771     struct wm_txqueue *);
    772 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    773     struct wm_rxqueue *);
    774 static int	wm_alloc_txrx_queues(struct wm_softc *);
    775 static void	wm_free_txrx_queues(struct wm_softc *);
    776 static int	wm_init_txrx_queues(struct wm_softc *);
    777 /* Start */
    778 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    779     struct wm_txsoft *, uint32_t *, uint8_t *);
    780 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    781 static void	wm_start(struct ifnet *);
    782 static void	wm_start_locked(struct ifnet *);
    783 static int	wm_transmit(struct ifnet *, struct mbuf *);
    784 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    785 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    786     bool);
    787 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    788     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    789 static void	wm_nq_start(struct ifnet *);
    790 static void	wm_nq_start_locked(struct ifnet *);
    791 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    792 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    793 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    794     bool);
    795 static void	wm_deferred_start_locked(struct wm_txqueue *);
    796 static void	wm_handle_queue(void *);
    797 /* Interrupt */
    798 static bool	wm_txeof(struct wm_txqueue *, u_int);
    799 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    800 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    801 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    802 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    803 static void	wm_linkintr(struct wm_softc *, uint32_t);
    804 static int	wm_intr_legacy(void *);
    805 static inline void	wm_txrxintr_disable(struct wm_queue *);
    806 static inline void	wm_txrxintr_enable(struct wm_queue *);
    807 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    808 static int	wm_txrxintr_msix(void *);
    809 static int	wm_linkintr_msix(void *);
    810 
    811 /*
    812  * Media related.
    813  * GMII, SGMII, TBI, SERDES and SFP.
    814  */
    815 /* Common */
    816 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    817 /* GMII related */
    818 static void	wm_gmii_reset(struct wm_softc *);
    819 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    820 static int	wm_get_phy_id_82575(struct wm_softc *);
    821 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    822 static int	wm_gmii_mediachange(struct ifnet *);
    823 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    824 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    825 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    826 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    827 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    828 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    829 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    830 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    831 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    832 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    833 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    834 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    835 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    836 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    837 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    838 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    839 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    840 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    841 	bool);
    842 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    843 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    844 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    845 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    846 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    847 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    848 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    849 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    850 static void	wm_gmii_statchg(struct ifnet *);
    851 /*
    852  * kumeran related (80003, ICH* and PCH*).
    853  * These functions are not for accessing MII registers but for accessing
    854  * kumeran specific registers.
    855  */
    856 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    857 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    858 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    859 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    860 /* EMI register related */
    861 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    862 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    863 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    864 /* SGMII */
    865 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    866 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    867 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    868 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    869 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    870 /* TBI related */
    871 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    872 static void	wm_tbi_mediainit(struct wm_softc *);
    873 static int	wm_tbi_mediachange(struct ifnet *);
    874 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    875 static int	wm_check_for_link(struct wm_softc *);
    876 static void	wm_tbi_tick(struct wm_softc *);
    877 /* SERDES related */
    878 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    879 static int	wm_serdes_mediachange(struct ifnet *);
    880 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    881 static void	wm_serdes_tick(struct wm_softc *);
    882 /* SFP related */
    883 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    884 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    885 
    886 /*
    887  * NVM related.
    888  * Microwire, SPI (w/wo EERD) and Flash.
    889  */
    890 /* Misc functions */
    891 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    892 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    893 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    894 /* Microwire */
    895 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    896 /* SPI */
    897 static int	wm_nvm_ready_spi(struct wm_softc *);
    898 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    899 /* Using with EERD */
    900 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    901 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    902 /* Flash */
    903 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    904     unsigned int *);
    905 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    906 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    907 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    908     uint32_t *);
    909 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    910 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    911 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    912 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    913 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    914 /* iNVM */
    915 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    916 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    917 /* Lock, detecting NVM type, validate checksum and read */
    918 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    919 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    920 static int	wm_nvm_validate_checksum(struct wm_softc *);
    921 static void	wm_nvm_version_invm(struct wm_softc *);
    922 static void	wm_nvm_version(struct wm_softc *);
    923 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    924 
    925 /*
    926  * Hardware semaphores.
    927  * Very complexed...
    928  */
    929 static int	wm_get_null(struct wm_softc *);
    930 static void	wm_put_null(struct wm_softc *);
    931 static int	wm_get_eecd(struct wm_softc *);
    932 static void	wm_put_eecd(struct wm_softc *);
    933 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    934 static void	wm_put_swsm_semaphore(struct wm_softc *);
    935 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    936 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    937 static int	wm_get_nvm_80003(struct wm_softc *);
    938 static void	wm_put_nvm_80003(struct wm_softc *);
    939 static int	wm_get_nvm_82571(struct wm_softc *);
    940 static void	wm_put_nvm_82571(struct wm_softc *);
    941 static int	wm_get_phy_82575(struct wm_softc *);
    942 static void	wm_put_phy_82575(struct wm_softc *);
    943 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    944 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    945 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    946 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    947 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    948 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    949 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    950 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    951 
    952 /*
    953  * Management mode and power management related subroutines.
    954  * BMC, AMT, suspend/resume and EEE.
    955  */
    956 #if 0
    957 static int	wm_check_mng_mode(struct wm_softc *);
    958 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    959 static int	wm_check_mng_mode_82574(struct wm_softc *);
    960 static int	wm_check_mng_mode_generic(struct wm_softc *);
    961 #endif
    962 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    963 static bool	wm_phy_resetisblocked(struct wm_softc *);
    964 static void	wm_get_hw_control(struct wm_softc *);
    965 static void	wm_release_hw_control(struct wm_softc *);
    966 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    967 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    968 static void	wm_init_manageability(struct wm_softc *);
    969 static void	wm_release_manageability(struct wm_softc *);
    970 static void	wm_get_wakeup(struct wm_softc *);
    971 static int	wm_ulp_disable(struct wm_softc *);
    972 static int	wm_enable_phy_wakeup(struct wm_softc *);
    973 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    974 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    975 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    976 static void	wm_enable_wakeup(struct wm_softc *);
    977 static void	wm_disable_aspm(struct wm_softc *);
    978 /* LPLU (Low Power Link Up) */
    979 static void	wm_lplu_d0_disable(struct wm_softc *);
    980 /* EEE */
    981 static int	wm_set_eee_i350(struct wm_softc *);
    982 static int	wm_set_eee_pchlan(struct wm_softc *);
    983 static int	wm_set_eee(struct wm_softc *);
    984 
    985 /*
    986  * Workarounds (mainly PHY related).
    987  * Basically, PHY's workarounds are in the PHY drivers.
    988  */
    989 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    990 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    991 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
    992 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
    993 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
    994 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    995 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    996 static int	wm_k1_workaround_lv(struct wm_softc *);
    997 static int	wm_link_stall_workaround_hv(struct wm_softc *);
    998 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    999 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1000 static void	wm_reset_init_script_82575(struct wm_softc *);
   1001 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1002 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1003 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1004 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1005 static int	wm_pll_workaround_i210(struct wm_softc *);
   1006 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1007 
   1008 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1009     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1010 
   1011 /*
   1012  * Devices supported by this driver.
   1013  */
   1014 static const struct wm_product {
   1015 	pci_vendor_id_t		wmp_vendor;
   1016 	pci_product_id_t	wmp_product;
   1017 	const char		*wmp_name;
   1018 	wm_chip_type		wmp_type;
   1019 	uint32_t		wmp_flags;
   1020 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1021 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1022 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1023 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1024 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1025 } wm_products[] = {
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1027 	  "Intel i82542 1000BASE-X Ethernet",
   1028 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1029 
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1031 	  "Intel i82543GC 1000BASE-X Ethernet",
   1032 	  WM_T_82543,		WMP_F_FIBER },
   1033 
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1035 	  "Intel i82543GC 1000BASE-T Ethernet",
   1036 	  WM_T_82543,		WMP_F_COPPER },
   1037 
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1039 	  "Intel i82544EI 1000BASE-T Ethernet",
   1040 	  WM_T_82544,		WMP_F_COPPER },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1043 	  "Intel i82544EI 1000BASE-X Ethernet",
   1044 	  WM_T_82544,		WMP_F_FIBER },
   1045 
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1047 	  "Intel i82544GC 1000BASE-T Ethernet",
   1048 	  WM_T_82544,		WMP_F_COPPER },
   1049 
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1051 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1052 	  WM_T_82544,		WMP_F_COPPER },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1055 	  "Intel i82540EM 1000BASE-T Ethernet",
   1056 	  WM_T_82540,		WMP_F_COPPER },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1059 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1060 	  WM_T_82540,		WMP_F_COPPER },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1063 	  "Intel i82540EP 1000BASE-T Ethernet",
   1064 	  WM_T_82540,		WMP_F_COPPER },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1067 	  "Intel i82540EP 1000BASE-T Ethernet",
   1068 	  WM_T_82540,		WMP_F_COPPER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1071 	  "Intel i82540EP 1000BASE-T Ethernet",
   1072 	  WM_T_82540,		WMP_F_COPPER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1075 	  "Intel i82545EM 1000BASE-T Ethernet",
   1076 	  WM_T_82545,		WMP_F_COPPER },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1079 	  "Intel i82545GM 1000BASE-T Ethernet",
   1080 	  WM_T_82545_3,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1083 	  "Intel i82545GM 1000BASE-X Ethernet",
   1084 	  WM_T_82545_3,		WMP_F_FIBER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1087 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1088 	  WM_T_82545_3,		WMP_F_SERDES },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1091 	  "Intel i82546EB 1000BASE-T Ethernet",
   1092 	  WM_T_82546,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1095 	  "Intel i82546EB 1000BASE-T Ethernet",
   1096 	  WM_T_82546,		WMP_F_COPPER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1099 	  "Intel i82545EM 1000BASE-X Ethernet",
   1100 	  WM_T_82545,		WMP_F_FIBER },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1103 	  "Intel i82546EB 1000BASE-X Ethernet",
   1104 	  WM_T_82546,		WMP_F_FIBER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1107 	  "Intel i82546GB 1000BASE-T Ethernet",
   1108 	  WM_T_82546_3,		WMP_F_COPPER },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1111 	  "Intel i82546GB 1000BASE-X Ethernet",
   1112 	  WM_T_82546_3,		WMP_F_FIBER },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1115 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1116 	  WM_T_82546_3,		WMP_F_SERDES },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1119 	  "i82546GB quad-port Gigabit Ethernet",
   1120 	  WM_T_82546_3,		WMP_F_COPPER },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1123 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1124 	  WM_T_82546_3,		WMP_F_COPPER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1127 	  "Intel PRO/1000MT (82546GB)",
   1128 	  WM_T_82546_3,		WMP_F_COPPER },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1131 	  "Intel i82541EI 1000BASE-T Ethernet",
   1132 	  WM_T_82541,		WMP_F_COPPER },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1135 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1136 	  WM_T_82541,		WMP_F_COPPER },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1139 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1140 	  WM_T_82541,		WMP_F_COPPER },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1143 	  "Intel i82541ER 1000BASE-T Ethernet",
   1144 	  WM_T_82541_2,		WMP_F_COPPER },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1147 	  "Intel i82541GI 1000BASE-T Ethernet",
   1148 	  WM_T_82541_2,		WMP_F_COPPER },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1151 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1152 	  WM_T_82541_2,		WMP_F_COPPER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1155 	  "Intel i82541PI 1000BASE-T Ethernet",
   1156 	  WM_T_82541_2,		WMP_F_COPPER },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1159 	  "Intel i82547EI 1000BASE-T Ethernet",
   1160 	  WM_T_82547,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1163 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1164 	  WM_T_82547,		WMP_F_COPPER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1167 	  "Intel i82547GI 1000BASE-T Ethernet",
   1168 	  WM_T_82547_2,		WMP_F_COPPER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1171 	  "Intel PRO/1000 PT (82571EB)",
   1172 	  WM_T_82571,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1175 	  "Intel PRO/1000 PF (82571EB)",
   1176 	  WM_T_82571,		WMP_F_FIBER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1179 	  "Intel PRO/1000 PB (82571EB)",
   1180 	  WM_T_82571,		WMP_F_SERDES },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1183 	  "Intel PRO/1000 QT (82571EB)",
   1184 	  WM_T_82571,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1187 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1188 	  WM_T_82571,		WMP_F_COPPER, },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1191 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1192 	  WM_T_82571,		WMP_F_COPPER, },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1195 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1196 	  WM_T_82571,		WMP_F_SERDES, },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1199 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1200 	  WM_T_82571,		WMP_F_SERDES, },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1203 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1204 	  WM_T_82571,		WMP_F_FIBER, },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1207 	  "Intel i82572EI 1000baseT Ethernet",
   1208 	  WM_T_82572,		WMP_F_COPPER },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1211 	  "Intel i82572EI 1000baseX Ethernet",
   1212 	  WM_T_82572,		WMP_F_FIBER },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1215 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1216 	  WM_T_82572,		WMP_F_SERDES },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1219 	  "Intel i82572EI 1000baseT Ethernet",
   1220 	  WM_T_82572,		WMP_F_COPPER },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1223 	  "Intel i82573E",
   1224 	  WM_T_82573,		WMP_F_COPPER },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1227 	  "Intel i82573E IAMT",
   1228 	  WM_T_82573,		WMP_F_COPPER },
   1229 
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1231 	  "Intel i82573L Gigabit Ethernet",
   1232 	  WM_T_82573,		WMP_F_COPPER },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1235 	  "Intel i82574L",
   1236 	  WM_T_82574,		WMP_F_COPPER },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1239 	  "Intel i82574L",
   1240 	  WM_T_82574,		WMP_F_COPPER },
   1241 
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1243 	  "Intel i82583V",
   1244 	  WM_T_82583,		WMP_F_COPPER },
   1245 
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1247 	  "i80003 dual 1000baseT Ethernet",
   1248 	  WM_T_80003,		WMP_F_COPPER },
   1249 
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1251 	  "i80003 dual 1000baseX Ethernet",
   1252 	  WM_T_80003,		WMP_F_COPPER },
   1253 
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1255 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1256 	  WM_T_80003,		WMP_F_SERDES },
   1257 
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1259 	  "Intel i80003 1000baseT Ethernet",
   1260 	  WM_T_80003,		WMP_F_COPPER },
   1261 
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1263 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1264 	  WM_T_80003,		WMP_F_SERDES },
   1265 
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1267 	  "Intel i82801H (M_AMT) LAN Controller",
   1268 	  WM_T_ICH8,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1270 	  "Intel i82801H (AMT) LAN Controller",
   1271 	  WM_T_ICH8,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1273 	  "Intel i82801H LAN Controller",
   1274 	  WM_T_ICH8,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1276 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1277 	  WM_T_ICH8,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1279 	  "Intel i82801H (M) LAN Controller",
   1280 	  WM_T_ICH8,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1282 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1283 	  WM_T_ICH8,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1285 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1286 	  WM_T_ICH8,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1288 	  "82567V-3 LAN Controller",
   1289 	  WM_T_ICH8,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1291 	  "82801I (AMT) LAN Controller",
   1292 	  WM_T_ICH9,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1294 	  "82801I 10/100 LAN Controller",
   1295 	  WM_T_ICH9,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1297 	  "82801I (G) 10/100 LAN Controller",
   1298 	  WM_T_ICH9,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1300 	  "82801I (GT) 10/100 LAN Controller",
   1301 	  WM_T_ICH9,		WMP_F_COPPER },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1303 	  "82801I (C) LAN Controller",
   1304 	  WM_T_ICH9,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1306 	  "82801I mobile LAN Controller",
   1307 	  WM_T_ICH9,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1309 	  "82801I mobile (V) LAN Controller",
   1310 	  WM_T_ICH9,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1312 	  "82801I mobile (AMT) LAN Controller",
   1313 	  WM_T_ICH9,		WMP_F_COPPER },
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1315 	  "82567LM-4 LAN Controller",
   1316 	  WM_T_ICH9,		WMP_F_COPPER },
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1318 	  "82567LM-2 LAN Controller",
   1319 	  WM_T_ICH10,		WMP_F_COPPER },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1321 	  "82567LF-2 LAN Controller",
   1322 	  WM_T_ICH10,		WMP_F_COPPER },
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1324 	  "82567LM-3 LAN Controller",
   1325 	  WM_T_ICH10,		WMP_F_COPPER },
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1327 	  "82567LF-3 LAN Controller",
   1328 	  WM_T_ICH10,		WMP_F_COPPER },
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1330 	  "82567V-2 LAN Controller",
   1331 	  WM_T_ICH10,		WMP_F_COPPER },
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1333 	  "82567V-3? LAN Controller",
   1334 	  WM_T_ICH10,		WMP_F_COPPER },
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1336 	  "HANKSVILLE LAN Controller",
   1337 	  WM_T_ICH10,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1339 	  "PCH LAN (82577LM) Controller",
   1340 	  WM_T_PCH,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1342 	  "PCH LAN (82577LC) Controller",
   1343 	  WM_T_PCH,		WMP_F_COPPER },
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1345 	  "PCH LAN (82578DM) Controller",
   1346 	  WM_T_PCH,		WMP_F_COPPER },
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1348 	  "PCH LAN (82578DC) Controller",
   1349 	  WM_T_PCH,		WMP_F_COPPER },
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1351 	  "PCH2 LAN (82579LM) Controller",
   1352 	  WM_T_PCH2,		WMP_F_COPPER },
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1354 	  "PCH2 LAN (82579V) Controller",
   1355 	  WM_T_PCH2,		WMP_F_COPPER },
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1357 	  "82575EB dual-1000baseT Ethernet",
   1358 	  WM_T_82575,		WMP_F_COPPER },
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1360 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1361 	  WM_T_82575,		WMP_F_SERDES },
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1363 	  "82575GB quad-1000baseT Ethernet",
   1364 	  WM_T_82575,		WMP_F_COPPER },
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1366 	  "82575GB quad-1000baseT Ethernet (PM)",
   1367 	  WM_T_82575,		WMP_F_COPPER },
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1369 	  "82576 1000BaseT Ethernet",
   1370 	  WM_T_82576,		WMP_F_COPPER },
   1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1372 	  "82576 1000BaseX Ethernet",
   1373 	  WM_T_82576,		WMP_F_FIBER },
   1374 
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1376 	  "82576 gigabit Ethernet (SERDES)",
   1377 	  WM_T_82576,		WMP_F_SERDES },
   1378 
   1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1380 	  "82576 quad-1000BaseT Ethernet",
   1381 	  WM_T_82576,		WMP_F_COPPER },
   1382 
   1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1384 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1385 	  WM_T_82576,		WMP_F_COPPER },
   1386 
   1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1388 	  "82576 gigabit Ethernet",
   1389 	  WM_T_82576,		WMP_F_COPPER },
   1390 
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1392 	  "82576 gigabit Ethernet (SERDES)",
   1393 	  WM_T_82576,		WMP_F_SERDES },
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1395 	  "82576 quad-gigabit Ethernet (SERDES)",
   1396 	  WM_T_82576,		WMP_F_SERDES },
   1397 
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1399 	  "82580 1000BaseT Ethernet",
   1400 	  WM_T_82580,		WMP_F_COPPER },
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1402 	  "82580 1000BaseX Ethernet",
   1403 	  WM_T_82580,		WMP_F_FIBER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1406 	  "82580 1000BaseT Ethernet (SERDES)",
   1407 	  WM_T_82580,		WMP_F_SERDES },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1410 	  "82580 gigabit Ethernet (SGMII)",
   1411 	  WM_T_82580,		WMP_F_COPPER },
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1413 	  "82580 dual-1000BaseT Ethernet",
   1414 	  WM_T_82580,		WMP_F_COPPER },
   1415 
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1417 	  "82580 quad-1000BaseX Ethernet",
   1418 	  WM_T_82580,		WMP_F_FIBER },
   1419 
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1421 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1422 	  WM_T_82580,		WMP_F_COPPER },
   1423 
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1425 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1426 	  WM_T_82580,		WMP_F_SERDES },
   1427 
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1429 	  "DH89XXCC 1000BASE-KX Ethernet",
   1430 	  WM_T_82580,		WMP_F_SERDES },
   1431 
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1433 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1434 	  WM_T_82580,		WMP_F_SERDES },
   1435 
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1437 	  "I350 Gigabit Network Connection",
   1438 	  WM_T_I350,		WMP_F_COPPER },
   1439 
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1441 	  "I350 Gigabit Fiber Network Connection",
   1442 	  WM_T_I350,		WMP_F_FIBER },
   1443 
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1445 	  "I350 Gigabit Backplane Connection",
   1446 	  WM_T_I350,		WMP_F_SERDES },
   1447 
   1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1449 	  "I350 Quad Port Gigabit Ethernet",
   1450 	  WM_T_I350,		WMP_F_SERDES },
   1451 
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1453 	  "I350 Gigabit Connection",
   1454 	  WM_T_I350,		WMP_F_COPPER },
   1455 
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1457 	  "I354 Gigabit Ethernet (KX)",
   1458 	  WM_T_I354,		WMP_F_SERDES },
   1459 
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1461 	  "I354 Gigabit Ethernet (SGMII)",
   1462 	  WM_T_I354,		WMP_F_COPPER },
   1463 
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1465 	  "I354 Gigabit Ethernet (2.5G)",
   1466 	  WM_T_I354,		WMP_F_COPPER },
   1467 
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1469 	  "I210-T1 Ethernet Server Adapter",
   1470 	  WM_T_I210,		WMP_F_COPPER },
   1471 
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1473 	  "I210 Ethernet (Copper OEM)",
   1474 	  WM_T_I210,		WMP_F_COPPER },
   1475 
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1477 	  "I210 Ethernet (Copper IT)",
   1478 	  WM_T_I210,		WMP_F_COPPER },
   1479 
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1481 	  "I210 Ethernet (Copper, FLASH less)",
   1482 	  WM_T_I210,		WMP_F_COPPER },
   1483 
   1484 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1485 	  "I210 Gigabit Ethernet (Fiber)",
   1486 	  WM_T_I210,		WMP_F_FIBER },
   1487 
   1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1489 	  "I210 Gigabit Ethernet (SERDES)",
   1490 	  WM_T_I210,		WMP_F_SERDES },
   1491 
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1493 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1494 	  WM_T_I210,		WMP_F_SERDES },
   1495 
   1496 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1497 	  "I210 Gigabit Ethernet (SGMII)",
   1498 	  WM_T_I210,		WMP_F_COPPER },
   1499 
   1500 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1501 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1502 	  WM_T_I210,		WMP_F_COPPER },
   1503 
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1505 	  "I211 Ethernet (COPPER)",
   1506 	  WM_T_I211,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1508 	  "I217 V Ethernet Connection",
   1509 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1511 	  "I217 LM Ethernet Connection",
   1512 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1514 	  "I218 V Ethernet Connection",
   1515 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1517 	  "I218 V Ethernet Connection",
   1518 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1520 	  "I218 V Ethernet Connection",
   1521 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1523 	  "I218 LM Ethernet Connection",
   1524 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1526 	  "I218 LM Ethernet Connection",
   1527 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1529 	  "I218 LM Ethernet Connection",
   1530 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1532 	  "I219 LM Ethernet Connection",
   1533 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1535 	  "I219 LM Ethernet Connection",
   1536 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1538 	  "I219 LM Ethernet Connection",
   1539 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1541 	  "I219 LM Ethernet Connection",
   1542 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1544 	  "I219 LM Ethernet Connection",
   1545 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1547 	  "I219 LM Ethernet Connection",
   1548 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1550 	  "I219 LM Ethernet Connection",
   1551 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1553 	  "I219 LM Ethernet Connection",
   1554 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1556 	  "I219 LM Ethernet Connection",
   1557 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1559 	  "I219 V Ethernet Connection",
   1560 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1562 	  "I219 V Ethernet Connection",
   1563 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1564 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1565 	  "I219 V Ethernet Connection",
   1566 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1567 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1568 	  "I219 V Ethernet Connection",
   1569 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1570 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1571 	  "I219 V Ethernet Connection",
   1572 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1573 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1574 	  "I219 V Ethernet Connection",
   1575 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1577 	  "I219 V Ethernet Connection",
   1578 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1580 	  "I219 V Ethernet Connection",
   1581 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1582 	{ 0,			0,
   1583 	  NULL,
   1584 	  0,			0 },
   1585 };
   1586 
   1587 /*
   1588  * Register read/write functions.
   1589  * Other than CSR_{READ|WRITE}().
   1590  */
   1591 
   1592 #if 0 /* Not currently used */
   1593 static inline uint32_t
   1594 wm_io_read(struct wm_softc *sc, int reg)
   1595 {
   1596 
   1597 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1598 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1599 }
   1600 #endif
   1601 
   1602 static inline void
   1603 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1604 {
   1605 
   1606 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1607 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1608 }
   1609 
   1610 static inline void
   1611 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1612     uint32_t data)
   1613 {
   1614 	uint32_t regval;
   1615 	int i;
   1616 
   1617 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1618 
   1619 	CSR_WRITE(sc, reg, regval);
   1620 
   1621 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1622 		delay(5);
   1623 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1624 			break;
   1625 	}
   1626 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1627 		aprint_error("%s: WARNING:"
   1628 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1629 		    device_xname(sc->sc_dev), reg);
   1630 	}
   1631 }
   1632 
   1633 static inline void
   1634 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1635 {
   1636 	wa->wa_low = htole32(v & 0xffffffffU);
   1637 	if (sizeof(bus_addr_t) == 8)
   1638 		wa->wa_high = htole32((uint64_t) v >> 32);
   1639 	else
   1640 		wa->wa_high = 0;
   1641 }
   1642 
   1643 /*
   1644  * Descriptor sync/init functions.
   1645  */
   1646 static inline void
   1647 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1648 {
   1649 	struct wm_softc *sc = txq->txq_sc;
   1650 
   1651 	/* If it will wrap around, sync to the end of the ring. */
   1652 	if ((start + num) > WM_NTXDESC(txq)) {
   1653 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1654 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1655 		    (WM_NTXDESC(txq) - start), ops);
   1656 		num -= (WM_NTXDESC(txq) - start);
   1657 		start = 0;
   1658 	}
   1659 
   1660 	/* Now sync whatever is left. */
   1661 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1662 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1663 }
   1664 
   1665 static inline void
   1666 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1667 {
   1668 	struct wm_softc *sc = rxq->rxq_sc;
   1669 
   1670 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1671 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1672 }
   1673 
   1674 static inline void
   1675 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1676 {
   1677 	struct wm_softc *sc = rxq->rxq_sc;
   1678 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1679 	struct mbuf *m = rxs->rxs_mbuf;
   1680 
   1681 	/*
   1682 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1683 	 * so that the payload after the Ethernet header is aligned
   1684 	 * to a 4-byte boundary.
   1685 
   1686 	 * XXX BRAINDAMAGE ALERT!
   1687 	 * The stupid chip uses the same size for every buffer, which
   1688 	 * is set in the Receive Control register.  We are using the 2K
   1689 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1690 	 * reason, we can't "scoot" packets longer than the standard
   1691 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1692 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1693 	 * the upper layer copy the headers.
   1694 	 */
   1695 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1696 
   1697 	if (sc->sc_type == WM_T_82574) {
   1698 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1699 		rxd->erx_data.erxd_addr =
   1700 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1701 		rxd->erx_data.erxd_dd = 0;
   1702 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1703 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1704 
   1705 		rxd->nqrx_data.nrxd_paddr =
   1706 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1707 		/* Currently, split header is not supported. */
   1708 		rxd->nqrx_data.nrxd_haddr = 0;
   1709 	} else {
   1710 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1711 
   1712 		wm_set_dma_addr(&rxd->wrx_addr,
   1713 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1714 		rxd->wrx_len = 0;
   1715 		rxd->wrx_cksum = 0;
   1716 		rxd->wrx_status = 0;
   1717 		rxd->wrx_errors = 0;
   1718 		rxd->wrx_special = 0;
   1719 	}
   1720 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1721 
   1722 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1723 }
   1724 
   1725 /*
   1726  * Device driver interface functions and commonly used functions.
   1727  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1728  */
   1729 
   1730 /* Lookup supported device table */
   1731 static const struct wm_product *
   1732 wm_lookup(const struct pci_attach_args *pa)
   1733 {
   1734 	const struct wm_product *wmp;
   1735 
   1736 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1737 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1738 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1739 			return wmp;
   1740 	}
   1741 	return NULL;
   1742 }
   1743 
   1744 /* The match function (ca_match) */
   1745 static int
   1746 wm_match(device_t parent, cfdata_t cf, void *aux)
   1747 {
   1748 	struct pci_attach_args *pa = aux;
   1749 
   1750 	if (wm_lookup(pa) != NULL)
   1751 		return 1;
   1752 
   1753 	return 0;
   1754 }
   1755 
   1756 /* The attach function (ca_attach) */
   1757 static void
   1758 wm_attach(device_t parent, device_t self, void *aux)
   1759 {
   1760 	struct wm_softc *sc = device_private(self);
   1761 	struct pci_attach_args *pa = aux;
   1762 	prop_dictionary_t dict;
   1763 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1764 	pci_chipset_tag_t pc = pa->pa_pc;
   1765 	int counts[PCI_INTR_TYPE_SIZE];
   1766 	pci_intr_type_t max_type;
   1767 	const char *eetype, *xname;
   1768 	bus_space_tag_t memt;
   1769 	bus_space_handle_t memh;
   1770 	bus_size_t memsize;
   1771 	int memh_valid;
   1772 	int i, error;
   1773 	const struct wm_product *wmp;
   1774 	prop_data_t ea;
   1775 	prop_number_t pn;
   1776 	uint8_t enaddr[ETHER_ADDR_LEN];
   1777 	char buf[256];
   1778 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1779 	pcireg_t preg, memtype;
   1780 	uint16_t eeprom_data, apme_mask;
   1781 	bool force_clear_smbi;
   1782 	uint32_t link_mode;
   1783 	uint32_t reg;
   1784 
   1785 	sc->sc_dev = self;
   1786 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1787 	sc->sc_core_stopping = false;
   1788 
   1789 	wmp = wm_lookup(pa);
   1790 #ifdef DIAGNOSTIC
   1791 	if (wmp == NULL) {
   1792 		printf("\n");
   1793 		panic("wm_attach: impossible");
   1794 	}
   1795 #endif
   1796 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1797 
   1798 	sc->sc_pc = pa->pa_pc;
   1799 	sc->sc_pcitag = pa->pa_tag;
   1800 
   1801 	if (pci_dma64_available(pa))
   1802 		sc->sc_dmat = pa->pa_dmat64;
   1803 	else
   1804 		sc->sc_dmat = pa->pa_dmat;
   1805 
   1806 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1807 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1808 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1809 
   1810 	sc->sc_type = wmp->wmp_type;
   1811 
   1812 	/* Set default function pointers */
   1813 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1814 	sc->phy.release = sc->nvm.release = wm_put_null;
   1815 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1816 
   1817 	if (sc->sc_type < WM_T_82543) {
   1818 		if (sc->sc_rev < 2) {
   1819 			aprint_error_dev(sc->sc_dev,
   1820 			    "i82542 must be at least rev. 2\n");
   1821 			return;
   1822 		}
   1823 		if (sc->sc_rev < 3)
   1824 			sc->sc_type = WM_T_82542_2_0;
   1825 	}
   1826 
   1827 	/*
   1828 	 * Disable MSI for Errata:
   1829 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1830 	 *
   1831 	 *  82544: Errata 25
   1832 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1833 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1834 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1835 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1836 	 *
   1837 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1838 	 *
   1839 	 *  82571 & 82572: Errata 63
   1840 	 */
   1841 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1842 	    || (sc->sc_type == WM_T_82572))
   1843 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1844 
   1845 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1846 	    || (sc->sc_type == WM_T_82580)
   1847 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1848 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1849 		sc->sc_flags |= WM_F_NEWQUEUE;
   1850 
   1851 	/* Set device properties (mactype) */
   1852 	dict = device_properties(sc->sc_dev);
   1853 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1854 
   1855 	/*
   1856 	 * Map the device.  All devices support memory-mapped acccess,
   1857 	 * and it is really required for normal operation.
   1858 	 */
   1859 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1860 	switch (memtype) {
   1861 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1862 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1863 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1864 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1865 		break;
   1866 	default:
   1867 		memh_valid = 0;
   1868 		break;
   1869 	}
   1870 
   1871 	if (memh_valid) {
   1872 		sc->sc_st = memt;
   1873 		sc->sc_sh = memh;
   1874 		sc->sc_ss = memsize;
   1875 	} else {
   1876 		aprint_error_dev(sc->sc_dev,
   1877 		    "unable to map device registers\n");
   1878 		return;
   1879 	}
   1880 
   1881 	/*
   1882 	 * In addition, i82544 and later support I/O mapped indirect
   1883 	 * register access.  It is not desirable (nor supported in
   1884 	 * this driver) to use it for normal operation, though it is
   1885 	 * required to work around bugs in some chip versions.
   1886 	 */
   1887 	if (sc->sc_type >= WM_T_82544) {
   1888 		/* First we have to find the I/O BAR. */
   1889 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1890 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1891 			if (memtype == PCI_MAPREG_TYPE_IO)
   1892 				break;
   1893 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1894 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1895 				i += 4;	/* skip high bits, too */
   1896 		}
   1897 		if (i < PCI_MAPREG_END) {
   1898 			/*
   1899 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1900 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1901 			 * It's no problem because newer chips has no this
   1902 			 * bug.
   1903 			 *
   1904 			 * The i8254x doesn't apparently respond when the
   1905 			 * I/O BAR is 0, which looks somewhat like it's not
   1906 			 * been configured.
   1907 			 */
   1908 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1909 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1910 				aprint_error_dev(sc->sc_dev,
   1911 				    "WARNING: I/O BAR at zero.\n");
   1912 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1913 					0, &sc->sc_iot, &sc->sc_ioh,
   1914 					NULL, &sc->sc_ios) == 0) {
   1915 				sc->sc_flags |= WM_F_IOH_VALID;
   1916 			} else
   1917 				aprint_error_dev(sc->sc_dev,
   1918 				    "WARNING: unable to map I/O space\n");
   1919 		}
   1920 
   1921 	}
   1922 
   1923 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1924 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1925 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1926 	if (sc->sc_type < WM_T_82542_2_1)
   1927 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1928 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1929 
   1930 	/* Power up chip */
   1931 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1932 	    && error != EOPNOTSUPP) {
   1933 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1934 		return;
   1935 	}
   1936 
   1937 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1938 	/*
   1939 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1940 	 * resource.
   1941 	 */
   1942 	if (sc->sc_nqueues > 1) {
   1943 		max_type = PCI_INTR_TYPE_MSIX;
   1944 		/*
   1945 		 *  82583 has a MSI-X capability in the PCI configuration space
   1946 		 * but it doesn't support it. At least the document doesn't
   1947 		 * say anything about MSI-X.
   1948 		 */
   1949 		counts[PCI_INTR_TYPE_MSIX]
   1950 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1951 	} else {
   1952 		max_type = PCI_INTR_TYPE_MSI;
   1953 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1954 	}
   1955 
   1956 	/* Allocation settings */
   1957 	counts[PCI_INTR_TYPE_MSI] = 1;
   1958 	counts[PCI_INTR_TYPE_INTX] = 1;
   1959 	/* overridden by disable flags */
   1960 	if (wm_disable_msi != 0) {
   1961 		counts[PCI_INTR_TYPE_MSI] = 0;
   1962 		if (wm_disable_msix != 0) {
   1963 			max_type = PCI_INTR_TYPE_INTX;
   1964 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1965 		}
   1966 	} else if (wm_disable_msix != 0) {
   1967 		max_type = PCI_INTR_TYPE_MSI;
   1968 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1969 	}
   1970 
   1971 alloc_retry:
   1972 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1973 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1974 		return;
   1975 	}
   1976 
   1977 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1978 		error = wm_setup_msix(sc);
   1979 		if (error) {
   1980 			pci_intr_release(pc, sc->sc_intrs,
   1981 			    counts[PCI_INTR_TYPE_MSIX]);
   1982 
   1983 			/* Setup for MSI: Disable MSI-X */
   1984 			max_type = PCI_INTR_TYPE_MSI;
   1985 			counts[PCI_INTR_TYPE_MSI] = 1;
   1986 			counts[PCI_INTR_TYPE_INTX] = 1;
   1987 			goto alloc_retry;
   1988 		}
   1989 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1990 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   1991 		error = wm_setup_legacy(sc);
   1992 		if (error) {
   1993 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1994 			    counts[PCI_INTR_TYPE_MSI]);
   1995 
   1996 			/* The next try is for INTx: Disable MSI */
   1997 			max_type = PCI_INTR_TYPE_INTX;
   1998 			counts[PCI_INTR_TYPE_INTX] = 1;
   1999 			goto alloc_retry;
   2000 		}
   2001 	} else {
   2002 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2003 		error = wm_setup_legacy(sc);
   2004 		if (error) {
   2005 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2006 			    counts[PCI_INTR_TYPE_INTX]);
   2007 			return;
   2008 		}
   2009 	}
   2010 
   2011 	/*
   2012 	 * Check the function ID (unit number of the chip).
   2013 	 */
   2014 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2015 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2016 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2017 	    || (sc->sc_type == WM_T_82580)
   2018 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2019 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2020 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2021 	else
   2022 		sc->sc_funcid = 0;
   2023 
   2024 	/*
   2025 	 * Determine a few things about the bus we're connected to.
   2026 	 */
   2027 	if (sc->sc_type < WM_T_82543) {
   2028 		/* We don't really know the bus characteristics here. */
   2029 		sc->sc_bus_speed = 33;
   2030 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2031 		/*
   2032 		 * CSA (Communication Streaming Architecture) is about as fast
   2033 		 * a 32-bit 66MHz PCI Bus.
   2034 		 */
   2035 		sc->sc_flags |= WM_F_CSA;
   2036 		sc->sc_bus_speed = 66;
   2037 		aprint_verbose_dev(sc->sc_dev,
   2038 		    "Communication Streaming Architecture\n");
   2039 		if (sc->sc_type == WM_T_82547) {
   2040 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2041 			callout_setfunc(&sc->sc_txfifo_ch,
   2042 			    wm_82547_txfifo_stall, sc);
   2043 			aprint_verbose_dev(sc->sc_dev,
   2044 			    "using 82547 Tx FIFO stall work-around\n");
   2045 		}
   2046 	} else if (sc->sc_type >= WM_T_82571) {
   2047 		sc->sc_flags |= WM_F_PCIE;
   2048 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2049 		    && (sc->sc_type != WM_T_ICH10)
   2050 		    && (sc->sc_type != WM_T_PCH)
   2051 		    && (sc->sc_type != WM_T_PCH2)
   2052 		    && (sc->sc_type != WM_T_PCH_LPT)
   2053 		    && (sc->sc_type != WM_T_PCH_SPT)
   2054 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2055 			/* ICH* and PCH* have no PCIe capability registers */
   2056 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2057 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2058 				NULL) == 0)
   2059 				aprint_error_dev(sc->sc_dev,
   2060 				    "unable to find PCIe capability\n");
   2061 		}
   2062 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2063 	} else {
   2064 		reg = CSR_READ(sc, WMREG_STATUS);
   2065 		if (reg & STATUS_BUS64)
   2066 			sc->sc_flags |= WM_F_BUS64;
   2067 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2068 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2069 
   2070 			sc->sc_flags |= WM_F_PCIX;
   2071 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2072 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2073 				aprint_error_dev(sc->sc_dev,
   2074 				    "unable to find PCIX capability\n");
   2075 			else if (sc->sc_type != WM_T_82545_3 &&
   2076 				 sc->sc_type != WM_T_82546_3) {
   2077 				/*
   2078 				 * Work around a problem caused by the BIOS
   2079 				 * setting the max memory read byte count
   2080 				 * incorrectly.
   2081 				 */
   2082 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2083 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2084 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2085 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2086 
   2087 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2088 				    PCIX_CMD_BYTECNT_SHIFT;
   2089 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2090 				    PCIX_STATUS_MAXB_SHIFT;
   2091 				if (bytecnt > maxb) {
   2092 					aprint_verbose_dev(sc->sc_dev,
   2093 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2094 					    512 << bytecnt, 512 << maxb);
   2095 					pcix_cmd = (pcix_cmd &
   2096 					    ~PCIX_CMD_BYTECNT_MASK) |
   2097 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2098 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2099 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2100 					    pcix_cmd);
   2101 				}
   2102 			}
   2103 		}
   2104 		/*
   2105 		 * The quad port adapter is special; it has a PCIX-PCIX
   2106 		 * bridge on the board, and can run the secondary bus at
   2107 		 * a higher speed.
   2108 		 */
   2109 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2110 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2111 								      : 66;
   2112 		} else if (sc->sc_flags & WM_F_PCIX) {
   2113 			switch (reg & STATUS_PCIXSPD_MASK) {
   2114 			case STATUS_PCIXSPD_50_66:
   2115 				sc->sc_bus_speed = 66;
   2116 				break;
   2117 			case STATUS_PCIXSPD_66_100:
   2118 				sc->sc_bus_speed = 100;
   2119 				break;
   2120 			case STATUS_PCIXSPD_100_133:
   2121 				sc->sc_bus_speed = 133;
   2122 				break;
   2123 			default:
   2124 				aprint_error_dev(sc->sc_dev,
   2125 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2126 				    reg & STATUS_PCIXSPD_MASK);
   2127 				sc->sc_bus_speed = 66;
   2128 				break;
   2129 			}
   2130 		} else
   2131 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2132 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2133 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2134 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2135 	}
   2136 
   2137 	/* clear interesting stat counters */
   2138 	CSR_READ(sc, WMREG_COLC);
   2139 	CSR_READ(sc, WMREG_RXERRC);
   2140 
   2141 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2142 	    || (sc->sc_type >= WM_T_ICH8))
   2143 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2144 	if (sc->sc_type >= WM_T_ICH8)
   2145 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2146 
   2147 	/* Set PHY, NVM mutex related stuff */
   2148 	switch (sc->sc_type) {
   2149 	case WM_T_82542_2_0:
   2150 	case WM_T_82542_2_1:
   2151 	case WM_T_82543:
   2152 	case WM_T_82544:
   2153 		/* Microwire */
   2154 		sc->nvm.read = wm_nvm_read_uwire;
   2155 		sc->sc_nvm_wordsize = 64;
   2156 		sc->sc_nvm_addrbits = 6;
   2157 		break;
   2158 	case WM_T_82540:
   2159 	case WM_T_82545:
   2160 	case WM_T_82545_3:
   2161 	case WM_T_82546:
   2162 	case WM_T_82546_3:
   2163 		/* Microwire */
   2164 		sc->nvm.read = wm_nvm_read_uwire;
   2165 		reg = CSR_READ(sc, WMREG_EECD);
   2166 		if (reg & EECD_EE_SIZE) {
   2167 			sc->sc_nvm_wordsize = 256;
   2168 			sc->sc_nvm_addrbits = 8;
   2169 		} else {
   2170 			sc->sc_nvm_wordsize = 64;
   2171 			sc->sc_nvm_addrbits = 6;
   2172 		}
   2173 		sc->sc_flags |= WM_F_LOCK_EECD;
   2174 		sc->nvm.acquire = wm_get_eecd;
   2175 		sc->nvm.release = wm_put_eecd;
   2176 		break;
   2177 	case WM_T_82541:
   2178 	case WM_T_82541_2:
   2179 	case WM_T_82547:
   2180 	case WM_T_82547_2:
   2181 		reg = CSR_READ(sc, WMREG_EECD);
   2182 		/*
   2183 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2184 		 * on 8254[17], so set flags and functios before calling it.
   2185 		 */
   2186 		sc->sc_flags |= WM_F_LOCK_EECD;
   2187 		sc->nvm.acquire = wm_get_eecd;
   2188 		sc->nvm.release = wm_put_eecd;
   2189 		if (reg & EECD_EE_TYPE) {
   2190 			/* SPI */
   2191 			sc->nvm.read = wm_nvm_read_spi;
   2192 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2193 			wm_nvm_set_addrbits_size_eecd(sc);
   2194 		} else {
   2195 			/* Microwire */
   2196 			sc->nvm.read = wm_nvm_read_uwire;
   2197 			if ((reg & EECD_EE_ABITS) != 0) {
   2198 				sc->sc_nvm_wordsize = 256;
   2199 				sc->sc_nvm_addrbits = 8;
   2200 			} else {
   2201 				sc->sc_nvm_wordsize = 64;
   2202 				sc->sc_nvm_addrbits = 6;
   2203 			}
   2204 		}
   2205 		break;
   2206 	case WM_T_82571:
   2207 	case WM_T_82572:
   2208 		/* SPI */
   2209 		sc->nvm.read = wm_nvm_read_eerd;
   2210 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2211 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2212 		wm_nvm_set_addrbits_size_eecd(sc);
   2213 		sc->phy.acquire = wm_get_swsm_semaphore;
   2214 		sc->phy.release = wm_put_swsm_semaphore;
   2215 		sc->nvm.acquire = wm_get_nvm_82571;
   2216 		sc->nvm.release = wm_put_nvm_82571;
   2217 		break;
   2218 	case WM_T_82573:
   2219 	case WM_T_82574:
   2220 	case WM_T_82583:
   2221 		sc->nvm.read = wm_nvm_read_eerd;
   2222 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2223 		if (sc->sc_type == WM_T_82573) {
   2224 			sc->phy.acquire = wm_get_swsm_semaphore;
   2225 			sc->phy.release = wm_put_swsm_semaphore;
   2226 			sc->nvm.acquire = wm_get_nvm_82571;
   2227 			sc->nvm.release = wm_put_nvm_82571;
   2228 		} else {
   2229 			/* Both PHY and NVM use the same semaphore. */
   2230 			sc->phy.acquire = sc->nvm.acquire
   2231 			    = wm_get_swfwhw_semaphore;
   2232 			sc->phy.release = sc->nvm.release
   2233 			    = wm_put_swfwhw_semaphore;
   2234 		}
   2235 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2236 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2237 			sc->sc_nvm_wordsize = 2048;
   2238 		} else {
   2239 			/* SPI */
   2240 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2241 			wm_nvm_set_addrbits_size_eecd(sc);
   2242 		}
   2243 		break;
   2244 	case WM_T_82575:
   2245 	case WM_T_82576:
   2246 	case WM_T_82580:
   2247 	case WM_T_I350:
   2248 	case WM_T_I354:
   2249 	case WM_T_80003:
   2250 		/* SPI */
   2251 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2252 		wm_nvm_set_addrbits_size_eecd(sc);
   2253 		if ((sc->sc_type == WM_T_80003)
   2254 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2255 			sc->nvm.read = wm_nvm_read_eerd;
   2256 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2257 		} else {
   2258 			sc->nvm.read = wm_nvm_read_spi;
   2259 			sc->sc_flags |= WM_F_LOCK_EECD;
   2260 		}
   2261 		sc->phy.acquire = wm_get_phy_82575;
   2262 		sc->phy.release = wm_put_phy_82575;
   2263 		sc->nvm.acquire = wm_get_nvm_80003;
   2264 		sc->nvm.release = wm_put_nvm_80003;
   2265 		break;
   2266 	case WM_T_ICH8:
   2267 	case WM_T_ICH9:
   2268 	case WM_T_ICH10:
   2269 	case WM_T_PCH:
   2270 	case WM_T_PCH2:
   2271 	case WM_T_PCH_LPT:
   2272 		sc->nvm.read = wm_nvm_read_ich8;
   2273 		/* FLASH */
   2274 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2275 		sc->sc_nvm_wordsize = 2048;
   2276 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2277 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2278 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2279 			aprint_error_dev(sc->sc_dev,
   2280 			    "can't map FLASH registers\n");
   2281 			goto out;
   2282 		}
   2283 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2284 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2285 		    ICH_FLASH_SECTOR_SIZE;
   2286 		sc->sc_ich8_flash_bank_size =
   2287 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2288 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2289 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2290 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2291 		sc->sc_flashreg_offset = 0;
   2292 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2293 		sc->phy.release = wm_put_swflag_ich8lan;
   2294 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2295 		sc->nvm.release = wm_put_nvm_ich8lan;
   2296 		break;
   2297 	case WM_T_PCH_SPT:
   2298 	case WM_T_PCH_CNP:
   2299 		sc->nvm.read = wm_nvm_read_spt;
   2300 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2301 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2302 		sc->sc_flasht = sc->sc_st;
   2303 		sc->sc_flashh = sc->sc_sh;
   2304 		sc->sc_ich8_flash_base = 0;
   2305 		sc->sc_nvm_wordsize =
   2306 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2307 		    * NVM_SIZE_MULTIPLIER;
   2308 		/* It is size in bytes, we want words */
   2309 		sc->sc_nvm_wordsize /= 2;
   2310 		/* Assume 2 banks */
   2311 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2312 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2313 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2314 		sc->phy.release = wm_put_swflag_ich8lan;
   2315 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2316 		sc->nvm.release = wm_put_nvm_ich8lan;
   2317 		break;
   2318 	case WM_T_I210:
   2319 	case WM_T_I211:
   2320 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2321 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2322 		if (wm_nvm_flash_presence_i210(sc)) {
   2323 			sc->nvm.read = wm_nvm_read_eerd;
   2324 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2325 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2326 			wm_nvm_set_addrbits_size_eecd(sc);
   2327 		} else {
   2328 			sc->nvm.read = wm_nvm_read_invm;
   2329 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2330 			sc->sc_nvm_wordsize = INVM_SIZE;
   2331 		}
   2332 		sc->phy.acquire = wm_get_phy_82575;
   2333 		sc->phy.release = wm_put_phy_82575;
   2334 		sc->nvm.acquire = wm_get_nvm_80003;
   2335 		sc->nvm.release = wm_put_nvm_80003;
   2336 		break;
   2337 	default:
   2338 		break;
   2339 	}
   2340 
   2341 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2342 	switch (sc->sc_type) {
   2343 	case WM_T_82571:
   2344 	case WM_T_82572:
   2345 		reg = CSR_READ(sc, WMREG_SWSM2);
   2346 		if ((reg & SWSM2_LOCK) == 0) {
   2347 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2348 			force_clear_smbi = true;
   2349 		} else
   2350 			force_clear_smbi = false;
   2351 		break;
   2352 	case WM_T_82573:
   2353 	case WM_T_82574:
   2354 	case WM_T_82583:
   2355 		force_clear_smbi = true;
   2356 		break;
   2357 	default:
   2358 		force_clear_smbi = false;
   2359 		break;
   2360 	}
   2361 	if (force_clear_smbi) {
   2362 		reg = CSR_READ(sc, WMREG_SWSM);
   2363 		if ((reg & SWSM_SMBI) != 0)
   2364 			aprint_error_dev(sc->sc_dev,
   2365 			    "Please update the Bootagent\n");
   2366 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2367 	}
   2368 
   2369 	/*
   2370 	 * Defer printing the EEPROM type until after verifying the checksum
   2371 	 * This allows the EEPROM type to be printed correctly in the case
   2372 	 * that no EEPROM is attached.
   2373 	 */
   2374 	/*
   2375 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2376 	 * this for later, so we can fail future reads from the EEPROM.
   2377 	 */
   2378 	if (wm_nvm_validate_checksum(sc)) {
   2379 		/*
   2380 		 * Read twice again because some PCI-e parts fail the
   2381 		 * first check due to the link being in sleep state.
   2382 		 */
   2383 		if (wm_nvm_validate_checksum(sc))
   2384 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2385 	}
   2386 
   2387 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2388 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2389 	else {
   2390 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2391 		    sc->sc_nvm_wordsize);
   2392 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2393 			aprint_verbose("iNVM");
   2394 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2395 			aprint_verbose("FLASH(HW)");
   2396 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2397 			aprint_verbose("FLASH");
   2398 		else {
   2399 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2400 				eetype = "SPI";
   2401 			else
   2402 				eetype = "MicroWire";
   2403 			aprint_verbose("(%d address bits) %s EEPROM",
   2404 			    sc->sc_nvm_addrbits, eetype);
   2405 		}
   2406 	}
   2407 	wm_nvm_version(sc);
   2408 	aprint_verbose("\n");
   2409 
   2410 	/*
   2411 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2412 	 * incorrect.
   2413 	 */
   2414 	wm_gmii_setup_phytype(sc, 0, 0);
   2415 
   2416 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2417 	switch (sc->sc_type) {
   2418 	case WM_T_ICH8:
   2419 	case WM_T_ICH9:
   2420 	case WM_T_ICH10:
   2421 	case WM_T_PCH:
   2422 	case WM_T_PCH2:
   2423 	case WM_T_PCH_LPT:
   2424 	case WM_T_PCH_SPT:
   2425 	case WM_T_PCH_CNP:
   2426 		apme_mask = WUC_APME;
   2427 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2428 		if ((eeprom_data & apme_mask) != 0)
   2429 			sc->sc_flags |= WM_F_WOL;
   2430 		break;
   2431 	default:
   2432 		break;
   2433 	}
   2434 
   2435 	/* Reset the chip to a known state. */
   2436 	wm_reset(sc);
   2437 
   2438 	/*
   2439 	 * Check for I21[01] PLL workaround.
   2440 	 *
   2441 	 * Three cases:
   2442 	 * a) Chip is I211.
   2443 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2444 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2445 	 */
   2446 	if (sc->sc_type == WM_T_I211)
   2447 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2448 	if (sc->sc_type == WM_T_I210) {
   2449 		if (!wm_nvm_flash_presence_i210(sc))
   2450 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2451 		else if ((sc->sc_nvm_ver_major < 3)
   2452 		    || ((sc->sc_nvm_ver_major == 3)
   2453 			&& (sc->sc_nvm_ver_minor < 25))) {
   2454 			aprint_verbose_dev(sc->sc_dev,
   2455 			    "ROM image version %d.%d is older than 3.25\n",
   2456 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2457 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2458 		}
   2459 	}
   2460 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2461 		wm_pll_workaround_i210(sc);
   2462 
   2463 	wm_get_wakeup(sc);
   2464 
   2465 	/* Non-AMT based hardware can now take control from firmware */
   2466 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2467 		wm_get_hw_control(sc);
   2468 
   2469 	/*
   2470 	 * Read the Ethernet address from the EEPROM, if not first found
   2471 	 * in device properties.
   2472 	 */
   2473 	ea = prop_dictionary_get(dict, "mac-address");
   2474 	if (ea != NULL) {
   2475 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2476 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2477 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2478 	} else {
   2479 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2480 			aprint_error_dev(sc->sc_dev,
   2481 			    "unable to read Ethernet address\n");
   2482 			goto out;
   2483 		}
   2484 	}
   2485 
   2486 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2487 	    ether_sprintf(enaddr));
   2488 
   2489 	/*
   2490 	 * Read the config info from the EEPROM, and set up various
   2491 	 * bits in the control registers based on their contents.
   2492 	 */
   2493 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2494 	if (pn != NULL) {
   2495 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2496 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2497 	} else {
   2498 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2499 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2500 			goto out;
   2501 		}
   2502 	}
   2503 
   2504 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2505 	if (pn != NULL) {
   2506 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2507 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2508 	} else {
   2509 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2510 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2511 			goto out;
   2512 		}
   2513 	}
   2514 
   2515 	/* check for WM_F_WOL */
   2516 	switch (sc->sc_type) {
   2517 	case WM_T_82542_2_0:
   2518 	case WM_T_82542_2_1:
   2519 	case WM_T_82543:
   2520 		/* dummy? */
   2521 		eeprom_data = 0;
   2522 		apme_mask = NVM_CFG3_APME;
   2523 		break;
   2524 	case WM_T_82544:
   2525 		apme_mask = NVM_CFG2_82544_APM_EN;
   2526 		eeprom_data = cfg2;
   2527 		break;
   2528 	case WM_T_82546:
   2529 	case WM_T_82546_3:
   2530 	case WM_T_82571:
   2531 	case WM_T_82572:
   2532 	case WM_T_82573:
   2533 	case WM_T_82574:
   2534 	case WM_T_82583:
   2535 	case WM_T_80003:
   2536 	case WM_T_82575:
   2537 	case WM_T_82576:
   2538 		apme_mask = NVM_CFG3_APME;
   2539 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2540 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2541 		break;
   2542 	case WM_T_82580:
   2543 	case WM_T_I350:
   2544 	case WM_T_I354:
   2545 	case WM_T_I210:
   2546 	case WM_T_I211:
   2547 		apme_mask = NVM_CFG3_APME;
   2548 		wm_nvm_read(sc,
   2549 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2550 		    1, &eeprom_data);
   2551 		break;
   2552 	case WM_T_ICH8:
   2553 	case WM_T_ICH9:
   2554 	case WM_T_ICH10:
   2555 	case WM_T_PCH:
   2556 	case WM_T_PCH2:
   2557 	case WM_T_PCH_LPT:
   2558 	case WM_T_PCH_SPT:
   2559 	case WM_T_PCH_CNP:
   2560 		/* Already checked before wm_reset () */
   2561 		apme_mask = eeprom_data = 0;
   2562 		break;
   2563 	default: /* XXX 82540 */
   2564 		apme_mask = NVM_CFG3_APME;
   2565 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2566 		break;
   2567 	}
   2568 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2569 	if ((eeprom_data & apme_mask) != 0)
   2570 		sc->sc_flags |= WM_F_WOL;
   2571 
   2572 	/*
   2573 	 * We have the eeprom settings, now apply the special cases
   2574 	 * where the eeprom may be wrong or the board won't support
   2575 	 * wake on lan on a particular port
   2576 	 */
   2577 	switch (sc->sc_pcidevid) {
   2578 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2579 		sc->sc_flags &= ~WM_F_WOL;
   2580 		break;
   2581 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2582 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2583 		/* Wake events only supported on port A for dual fiber
   2584 		 * regardless of eeprom setting */
   2585 		if (sc->sc_funcid == 1)
   2586 			sc->sc_flags &= ~WM_F_WOL;
   2587 		break;
   2588 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2589 		/* If quad port adapter, disable WoL on all but port A */
   2590 		if (sc->sc_funcid != 0)
   2591 			sc->sc_flags &= ~WM_F_WOL;
   2592 		break;
   2593 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2594 		/* Wake events only supported on port A for dual fiber
   2595 		 * regardless of eeprom setting */
   2596 		if (sc->sc_funcid == 1)
   2597 			sc->sc_flags &= ~WM_F_WOL;
   2598 		break;
   2599 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2600 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2601 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2602 		/* If quad port adapter, disable WoL on all but port A */
   2603 		if (sc->sc_funcid != 0)
   2604 			sc->sc_flags &= ~WM_F_WOL;
   2605 		break;
   2606 	}
   2607 
   2608 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2609 		/* Check NVM for autonegotiation */
   2610 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2611 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2612 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2613 		}
   2614 	}
   2615 
   2616 	/*
   2617 	 * XXX need special handling for some multiple port cards
   2618 	 * to disable a paticular port.
   2619 	 */
   2620 
   2621 	if (sc->sc_type >= WM_T_82544) {
   2622 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2623 		if (pn != NULL) {
   2624 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2625 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2626 		} else {
   2627 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2628 				aprint_error_dev(sc->sc_dev,
   2629 				    "unable to read SWDPIN\n");
   2630 				goto out;
   2631 			}
   2632 		}
   2633 	}
   2634 
   2635 	if (cfg1 & NVM_CFG1_ILOS)
   2636 		sc->sc_ctrl |= CTRL_ILOS;
   2637 
   2638 	/*
   2639 	 * XXX
   2640 	 * This code isn't correct because pin 2 and 3 are located
   2641 	 * in different position on newer chips. Check all datasheet.
   2642 	 *
   2643 	 * Until resolve this problem, check if a chip < 82580
   2644 	 */
   2645 	if (sc->sc_type <= WM_T_82580) {
   2646 		if (sc->sc_type >= WM_T_82544) {
   2647 			sc->sc_ctrl |=
   2648 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2649 			    CTRL_SWDPIO_SHIFT;
   2650 			sc->sc_ctrl |=
   2651 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2652 			    CTRL_SWDPINS_SHIFT;
   2653 		} else {
   2654 			sc->sc_ctrl |=
   2655 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2656 			    CTRL_SWDPIO_SHIFT;
   2657 		}
   2658 	}
   2659 
   2660 	/* XXX For other than 82580? */
   2661 	if (sc->sc_type == WM_T_82580) {
   2662 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2663 		if (nvmword & __BIT(13))
   2664 			sc->sc_ctrl |= CTRL_ILOS;
   2665 	}
   2666 
   2667 #if 0
   2668 	if (sc->sc_type >= WM_T_82544) {
   2669 		if (cfg1 & NVM_CFG1_IPS0)
   2670 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2671 		if (cfg1 & NVM_CFG1_IPS1)
   2672 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2673 		sc->sc_ctrl_ext |=
   2674 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2675 		    CTRL_EXT_SWDPIO_SHIFT;
   2676 		sc->sc_ctrl_ext |=
   2677 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2678 		    CTRL_EXT_SWDPINS_SHIFT;
   2679 	} else {
   2680 		sc->sc_ctrl_ext |=
   2681 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2682 		    CTRL_EXT_SWDPIO_SHIFT;
   2683 	}
   2684 #endif
   2685 
   2686 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2687 #if 0
   2688 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2689 #endif
   2690 
   2691 	if (sc->sc_type == WM_T_PCH) {
   2692 		uint16_t val;
   2693 
   2694 		/* Save the NVM K1 bit setting */
   2695 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2696 
   2697 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2698 			sc->sc_nvm_k1_enabled = 1;
   2699 		else
   2700 			sc->sc_nvm_k1_enabled = 0;
   2701 	}
   2702 
   2703 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2704 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2705 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2706 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2707 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2708 	    || sc->sc_type == WM_T_82573
   2709 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2710 		/* Copper only */
   2711 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2712 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2713 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2714 	    || (sc->sc_type ==WM_T_I211)) {
   2715 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2716 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2717 		switch (link_mode) {
   2718 		case CTRL_EXT_LINK_MODE_1000KX:
   2719 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2720 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2721 			break;
   2722 		case CTRL_EXT_LINK_MODE_SGMII:
   2723 			if (wm_sgmii_uses_mdio(sc)) {
   2724 				aprint_verbose_dev(sc->sc_dev,
   2725 				    "SGMII(MDIO)\n");
   2726 				sc->sc_flags |= WM_F_SGMII;
   2727 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2728 				break;
   2729 			}
   2730 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2731 			/*FALLTHROUGH*/
   2732 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2733 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2734 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2735 				if (link_mode
   2736 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2737 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2738 					sc->sc_flags |= WM_F_SGMII;
   2739 				} else {
   2740 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2741 					aprint_verbose_dev(sc->sc_dev,
   2742 					    "SERDES\n");
   2743 				}
   2744 				break;
   2745 			}
   2746 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2747 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2748 
   2749 			/* Change current link mode setting */
   2750 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2751 			switch (sc->sc_mediatype) {
   2752 			case WM_MEDIATYPE_COPPER:
   2753 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2754 				break;
   2755 			case WM_MEDIATYPE_SERDES:
   2756 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2757 				break;
   2758 			default:
   2759 				break;
   2760 			}
   2761 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2762 			break;
   2763 		case CTRL_EXT_LINK_MODE_GMII:
   2764 		default:
   2765 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2766 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2767 			break;
   2768 		}
   2769 
   2770 		reg &= ~CTRL_EXT_I2C_ENA;
   2771 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2772 			reg |= CTRL_EXT_I2C_ENA;
   2773 		else
   2774 			reg &= ~CTRL_EXT_I2C_ENA;
   2775 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2776 	} else if (sc->sc_type < WM_T_82543 ||
   2777 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2778 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2779 			aprint_error_dev(sc->sc_dev,
   2780 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2781 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2782 		}
   2783 	} else {
   2784 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2785 			aprint_error_dev(sc->sc_dev,
   2786 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2787 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2788 		}
   2789 	}
   2790 
   2791 	if (sc->sc_type >= WM_T_PCH2)
   2792 		sc->sc_flags |= WM_F_EEE;
   2793 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2794 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2795 		/* XXX: Need special handling for I354. (not yet) */
   2796 		if (sc->sc_type != WM_T_I354)
   2797 			sc->sc_flags |= WM_F_EEE;
   2798 	}
   2799 
   2800 	/* Set device properties (macflags) */
   2801 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2802 
   2803 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2804 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2805 
   2806 	/* Initialize the media structures accordingly. */
   2807 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2808 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2809 	else
   2810 		wm_tbi_mediainit(sc); /* All others */
   2811 
   2812 	ifp = &sc->sc_ethercom.ec_if;
   2813 	xname = device_xname(sc->sc_dev);
   2814 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2815 	ifp->if_softc = sc;
   2816 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2817 #ifdef WM_MPSAFE
   2818 	ifp->if_extflags = IFEF_MPSAFE;
   2819 #endif
   2820 	ifp->if_ioctl = wm_ioctl;
   2821 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2822 		ifp->if_start = wm_nq_start;
   2823 		/*
   2824 		 * When the number of CPUs is one and the controller can use
   2825 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2826 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2827 		 * and the other is used for link status changing.
   2828 		 * In this situation, wm_nq_transmit() is disadvantageous
   2829 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2830 		 */
   2831 		if (wm_is_using_multiqueue(sc))
   2832 			ifp->if_transmit = wm_nq_transmit;
   2833 	} else {
   2834 		ifp->if_start = wm_start;
   2835 		/*
   2836 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2837 		 */
   2838 		if (wm_is_using_multiqueue(sc))
   2839 			ifp->if_transmit = wm_transmit;
   2840 	}
   2841 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2842 	ifp->if_init = wm_init;
   2843 	ifp->if_stop = wm_stop;
   2844 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2845 	IFQ_SET_READY(&ifp->if_snd);
   2846 
   2847 	/* Check for jumbo frame */
   2848 	switch (sc->sc_type) {
   2849 	case WM_T_82573:
   2850 		/* XXX limited to 9234 if ASPM is disabled */
   2851 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2852 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2853 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2854 		break;
   2855 	case WM_T_82571:
   2856 	case WM_T_82572:
   2857 	case WM_T_82574:
   2858 	case WM_T_82583:
   2859 	case WM_T_82575:
   2860 	case WM_T_82576:
   2861 	case WM_T_82580:
   2862 	case WM_T_I350:
   2863 	case WM_T_I354:
   2864 	case WM_T_I210:
   2865 	case WM_T_I211:
   2866 	case WM_T_80003:
   2867 	case WM_T_ICH9:
   2868 	case WM_T_ICH10:
   2869 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2870 	case WM_T_PCH_LPT:
   2871 	case WM_T_PCH_SPT:
   2872 	case WM_T_PCH_CNP:
   2873 		/* XXX limited to 9234 */
   2874 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2875 		break;
   2876 	case WM_T_PCH:
   2877 		/* XXX limited to 4096 */
   2878 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2879 		break;
   2880 	case WM_T_82542_2_0:
   2881 	case WM_T_82542_2_1:
   2882 	case WM_T_ICH8:
   2883 		/* No support for jumbo frame */
   2884 		break;
   2885 	default:
   2886 		/* ETHER_MAX_LEN_JUMBO */
   2887 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2888 		break;
   2889 	}
   2890 
   2891 	/* If we're a i82543 or greater, we can support VLANs. */
   2892 	if (sc->sc_type >= WM_T_82543)
   2893 		sc->sc_ethercom.ec_capabilities |=
   2894 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2895 
   2896 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2897 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2898 
   2899 	/*
   2900 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2901 	 * on i82543 and later.
   2902 	 */
   2903 	if (sc->sc_type >= WM_T_82543) {
   2904 		ifp->if_capabilities |=
   2905 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2906 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2907 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2908 		    IFCAP_CSUM_TCPv6_Tx |
   2909 		    IFCAP_CSUM_UDPv6_Tx;
   2910 	}
   2911 
   2912 	/*
   2913 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2914 	 *
   2915 	 *	82541GI (8086:1076) ... no
   2916 	 *	82572EI (8086:10b9) ... yes
   2917 	 */
   2918 	if (sc->sc_type >= WM_T_82571) {
   2919 		ifp->if_capabilities |=
   2920 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2921 	}
   2922 
   2923 	/*
   2924 	 * If we're a i82544 or greater (except i82547), we can do
   2925 	 * TCP segmentation offload.
   2926 	 */
   2927 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2928 		ifp->if_capabilities |= IFCAP_TSOv4;
   2929 	}
   2930 
   2931 	if (sc->sc_type >= WM_T_82571) {
   2932 		ifp->if_capabilities |= IFCAP_TSOv6;
   2933 	}
   2934 
   2935 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2936 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2937 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2938 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2939 
   2940 #ifdef WM_MPSAFE
   2941 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2942 #else
   2943 	sc->sc_core_lock = NULL;
   2944 #endif
   2945 
   2946 	/* Attach the interface. */
   2947 	error = if_initialize(ifp);
   2948 	if (error != 0) {
   2949 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2950 		    error);
   2951 		return; /* Error */
   2952 	}
   2953 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2954 	ether_ifattach(ifp, enaddr);
   2955 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2956 	if_register(ifp);
   2957 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2958 	    RND_FLAG_DEFAULT);
   2959 
   2960 #ifdef WM_EVENT_COUNTERS
   2961 	/* Attach event counters. */
   2962 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2963 	    NULL, xname, "linkintr");
   2964 
   2965 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2966 	    NULL, xname, "tx_xoff");
   2967 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2968 	    NULL, xname, "tx_xon");
   2969 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2970 	    NULL, xname, "rx_xoff");
   2971 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2972 	    NULL, xname, "rx_xon");
   2973 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2974 	    NULL, xname, "rx_macctl");
   2975 #endif /* WM_EVENT_COUNTERS */
   2976 
   2977 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2978 		pmf_class_network_register(self, ifp);
   2979 	else
   2980 		aprint_error_dev(self, "couldn't establish power handler\n");
   2981 
   2982 	sc->sc_flags |= WM_F_ATTACHED;
   2983 out:
   2984 	return;
   2985 }
   2986 
   2987 /* The detach function (ca_detach) */
   2988 static int
   2989 wm_detach(device_t self, int flags __unused)
   2990 {
   2991 	struct wm_softc *sc = device_private(self);
   2992 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2993 	int i;
   2994 
   2995 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2996 		return 0;
   2997 
   2998 	/* Stop the interface. Callouts are stopped in it. */
   2999 	wm_stop(ifp, 1);
   3000 
   3001 	pmf_device_deregister(self);
   3002 
   3003 #ifdef WM_EVENT_COUNTERS
   3004 	evcnt_detach(&sc->sc_ev_linkintr);
   3005 
   3006 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3007 	evcnt_detach(&sc->sc_ev_tx_xon);
   3008 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3009 	evcnt_detach(&sc->sc_ev_rx_xon);
   3010 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3011 #endif /* WM_EVENT_COUNTERS */
   3012 
   3013 	rnd_detach_source(&sc->rnd_source);
   3014 
   3015 	/* Tell the firmware about the release */
   3016 	WM_CORE_LOCK(sc);
   3017 	wm_release_manageability(sc);
   3018 	wm_release_hw_control(sc);
   3019 	wm_enable_wakeup(sc);
   3020 	WM_CORE_UNLOCK(sc);
   3021 
   3022 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3023 
   3024 	/* Delete all remaining media. */
   3025 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   3026 
   3027 	ether_ifdetach(ifp);
   3028 	if_detach(ifp);
   3029 	if_percpuq_destroy(sc->sc_ipq);
   3030 
   3031 	/* Unload RX dmamaps and free mbufs */
   3032 	for (i = 0; i < sc->sc_nqueues; i++) {
   3033 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3034 		mutex_enter(rxq->rxq_lock);
   3035 		wm_rxdrain(rxq);
   3036 		mutex_exit(rxq->rxq_lock);
   3037 	}
   3038 	/* Must unlock here */
   3039 
   3040 	/* Disestablish the interrupt handler */
   3041 	for (i = 0; i < sc->sc_nintrs; i++) {
   3042 		if (sc->sc_ihs[i] != NULL) {
   3043 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3044 			sc->sc_ihs[i] = NULL;
   3045 		}
   3046 	}
   3047 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3048 
   3049 	wm_free_txrx_queues(sc);
   3050 
   3051 	/* Unmap the registers */
   3052 	if (sc->sc_ss) {
   3053 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3054 		sc->sc_ss = 0;
   3055 	}
   3056 	if (sc->sc_ios) {
   3057 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3058 		sc->sc_ios = 0;
   3059 	}
   3060 	if (sc->sc_flashs) {
   3061 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3062 		sc->sc_flashs = 0;
   3063 	}
   3064 
   3065 	if (sc->sc_core_lock)
   3066 		mutex_obj_free(sc->sc_core_lock);
   3067 	if (sc->sc_ich_phymtx)
   3068 		mutex_obj_free(sc->sc_ich_phymtx);
   3069 	if (sc->sc_ich_nvmmtx)
   3070 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3071 
   3072 	return 0;
   3073 }
   3074 
   3075 static bool
   3076 wm_suspend(device_t self, const pmf_qual_t *qual)
   3077 {
   3078 	struct wm_softc *sc = device_private(self);
   3079 
   3080 	wm_release_manageability(sc);
   3081 	wm_release_hw_control(sc);
   3082 	wm_enable_wakeup(sc);
   3083 
   3084 	return true;
   3085 }
   3086 
   3087 static bool
   3088 wm_resume(device_t self, const pmf_qual_t *qual)
   3089 {
   3090 	struct wm_softc *sc = device_private(self);
   3091 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3092 	pcireg_t reg;
   3093 	char buf[256];
   3094 
   3095 	reg = CSR_READ(sc, WMREG_WUS);
   3096 	if (reg != 0) {
   3097 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3098 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3099 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3100 	}
   3101 
   3102 	if (sc->sc_type >= WM_T_PCH2)
   3103 		wm_resume_workarounds_pchlan(sc);
   3104 	if ((ifp->if_flags & IFF_UP) == 0) {
   3105 		wm_reset(sc);
   3106 		/* Non-AMT based hardware can now take control from firmware */
   3107 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3108 			wm_get_hw_control(sc);
   3109 		wm_init_manageability(sc);
   3110 	} else {
   3111 		/*
   3112 		 * We called pmf_class_network_register(), so if_init() is
   3113 		 * automatically called when IFF_UP. wm_reset(),
   3114 		 * wm_get_hw_control() and wm_init_manageability() are called
   3115 		 * via wm_init().
   3116 		 */
   3117 	}
   3118 
   3119 	return true;
   3120 }
   3121 
   3122 /*
   3123  * wm_watchdog:		[ifnet interface function]
   3124  *
   3125  *	Watchdog timer handler.
   3126  */
   3127 static void
   3128 wm_watchdog(struct ifnet *ifp)
   3129 {
   3130 	int qid;
   3131 	struct wm_softc *sc = ifp->if_softc;
   3132 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3133 
   3134 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3135 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3136 
   3137 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3138 	}
   3139 
   3140 	/* IF any of queues hanged up, reset the interface. */
   3141 	if (hang_queue != 0) {
   3142 		(void)wm_init(ifp);
   3143 
   3144 		/*
   3145 		 * There are still some upper layer processing which call
   3146 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3147 		 */
   3148 		/* Try to get more packets going. */
   3149 		ifp->if_start(ifp);
   3150 	}
   3151 }
   3152 
   3153 
   3154 static void
   3155 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3156 {
   3157 
   3158 	mutex_enter(txq->txq_lock);
   3159 	if (txq->txq_sending &&
   3160 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3161 		wm_watchdog_txq_locked(ifp, txq, hang);
   3162 
   3163 	mutex_exit(txq->txq_lock);
   3164 }
   3165 
   3166 static void
   3167 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3168     uint16_t *hang)
   3169 {
   3170 	struct wm_softc *sc = ifp->if_softc;
   3171 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3172 
   3173 	KASSERT(mutex_owned(txq->txq_lock));
   3174 
   3175 	/*
   3176 	 * Since we're using delayed interrupts, sweep up
   3177 	 * before we report an error.
   3178 	 */
   3179 	wm_txeof(txq, UINT_MAX);
   3180 
   3181 	if (txq->txq_sending)
   3182 		*hang |= __BIT(wmq->wmq_id);
   3183 
   3184 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3185 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3186 		    device_xname(sc->sc_dev));
   3187 	} else {
   3188 #ifdef WM_DEBUG
   3189 		int i, j;
   3190 		struct wm_txsoft *txs;
   3191 #endif
   3192 		log(LOG_ERR,
   3193 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3194 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3195 		    txq->txq_next);
   3196 		ifp->if_oerrors++;
   3197 #ifdef WM_DEBUG
   3198 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3199 		    i = WM_NEXTTXS(txq, i)) {
   3200 			txs = &txq->txq_soft[i];
   3201 			printf("txs %d tx %d -> %d\n",
   3202 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3203 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3204 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3205 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3206 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3207 					printf("\t %#08x%08x\n",
   3208 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3209 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3210 				} else {
   3211 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3212 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3213 					    txq->txq_descs[j].wtx_addr.wa_low);
   3214 					printf("\t %#04x%02x%02x%08x\n",
   3215 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3216 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3217 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3218 					    txq->txq_descs[j].wtx_cmdlen);
   3219 				}
   3220 				if (j == txs->txs_lastdesc)
   3221 					break;
   3222 			}
   3223 		}
   3224 #endif
   3225 	}
   3226 }
   3227 
   3228 /*
   3229  * wm_tick:
   3230  *
   3231  *	One second timer, used to check link status, sweep up
   3232  *	completed transmit jobs, etc.
   3233  */
   3234 static void
   3235 wm_tick(void *arg)
   3236 {
   3237 	struct wm_softc *sc = arg;
   3238 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3239 #ifndef WM_MPSAFE
   3240 	int s = splnet();
   3241 #endif
   3242 
   3243 	WM_CORE_LOCK(sc);
   3244 
   3245 	if (sc->sc_core_stopping) {
   3246 		WM_CORE_UNLOCK(sc);
   3247 #ifndef WM_MPSAFE
   3248 		splx(s);
   3249 #endif
   3250 		return;
   3251 	}
   3252 
   3253 	if (sc->sc_type >= WM_T_82542_2_1) {
   3254 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3255 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3256 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3257 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3258 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3259 	}
   3260 
   3261 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3262 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3263 	    + CSR_READ(sc, WMREG_CRCERRS)
   3264 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3265 	    + CSR_READ(sc, WMREG_SYMERRC)
   3266 	    + CSR_READ(sc, WMREG_RXERRC)
   3267 	    + CSR_READ(sc, WMREG_SEC)
   3268 	    + CSR_READ(sc, WMREG_CEXTERR)
   3269 	    + CSR_READ(sc, WMREG_RLEC);
   3270 	/*
   3271 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3272 	 * memory. It does not mean the number of dropped packet. Because
   3273 	 * ethernet controller can receive packets in such case if there is
   3274 	 * space in phy's FIFO.
   3275 	 *
   3276 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3277 	 * own EVCNT instead of if_iqdrops.
   3278 	 */
   3279 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3280 
   3281 	if (sc->sc_flags & WM_F_HAS_MII)
   3282 		mii_tick(&sc->sc_mii);
   3283 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3284 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3285 		wm_serdes_tick(sc);
   3286 	else
   3287 		wm_tbi_tick(sc);
   3288 
   3289 	WM_CORE_UNLOCK(sc);
   3290 
   3291 	wm_watchdog(ifp);
   3292 
   3293 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3294 }
   3295 
   3296 static int
   3297 wm_ifflags_cb(struct ethercom *ec)
   3298 {
   3299 	struct ifnet *ifp = &ec->ec_if;
   3300 	struct wm_softc *sc = ifp->if_softc;
   3301 	int iffchange, ecchange;
   3302 	bool needreset = false;
   3303 	int rc = 0;
   3304 
   3305 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3306 		device_xname(sc->sc_dev), __func__));
   3307 
   3308 	WM_CORE_LOCK(sc);
   3309 
   3310 	/*
   3311 	 * Check for if_flags.
   3312 	 * Main usage is to prevent linkdown when opening bpf.
   3313 	 */
   3314 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3315 	sc->sc_if_flags = ifp->if_flags;
   3316 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3317 		needreset = true;
   3318 		goto ec;
   3319 	}
   3320 
   3321 	/* iff related updates */
   3322 	if ((iffchange & IFF_PROMISC) != 0)
   3323 		wm_set_filter(sc);
   3324 
   3325 	wm_set_vlan(sc);
   3326 
   3327 ec:
   3328 	/* Check for ec_capenable. */
   3329 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3330 	sc->sc_ec_capenable = ec->ec_capenable;
   3331 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3332 		needreset = true;
   3333 		goto out;
   3334 	}
   3335 
   3336 	/* ec related updates */
   3337 	wm_set_eee(sc);
   3338 
   3339 out:
   3340 	if (needreset)
   3341 		rc = ENETRESET;
   3342 	WM_CORE_UNLOCK(sc);
   3343 
   3344 	return rc;
   3345 }
   3346 
   3347 /*
   3348  * wm_ioctl:		[ifnet interface function]
   3349  *
   3350  *	Handle control requests from the operator.
   3351  */
   3352 static int
   3353 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3354 {
   3355 	struct wm_softc *sc = ifp->if_softc;
   3356 	struct ifreq *ifr = (struct ifreq *)data;
   3357 	struct ifaddr *ifa = (struct ifaddr *)data;
   3358 	struct sockaddr_dl *sdl;
   3359 	int s, error;
   3360 
   3361 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3362 		device_xname(sc->sc_dev), __func__));
   3363 
   3364 #ifndef WM_MPSAFE
   3365 	s = splnet();
   3366 #endif
   3367 	switch (cmd) {
   3368 	case SIOCSIFMEDIA:
   3369 		WM_CORE_LOCK(sc);
   3370 		/* Flow control requires full-duplex mode. */
   3371 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3372 		    (ifr->ifr_media & IFM_FDX) == 0)
   3373 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3374 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3375 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3376 				/* We can do both TXPAUSE and RXPAUSE. */
   3377 				ifr->ifr_media |=
   3378 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3379 			}
   3380 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3381 		}
   3382 		WM_CORE_UNLOCK(sc);
   3383 #ifdef WM_MPSAFE
   3384 		s = splnet();
   3385 #endif
   3386 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3387 #ifdef WM_MPSAFE
   3388 		splx(s);
   3389 #endif
   3390 		break;
   3391 	case SIOCINITIFADDR:
   3392 		WM_CORE_LOCK(sc);
   3393 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3394 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3395 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3396 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3397 			/* Unicast address is the first multicast entry */
   3398 			wm_set_filter(sc);
   3399 			error = 0;
   3400 			WM_CORE_UNLOCK(sc);
   3401 			break;
   3402 		}
   3403 		WM_CORE_UNLOCK(sc);
   3404 		/*FALLTHROUGH*/
   3405 	default:
   3406 #ifdef WM_MPSAFE
   3407 		s = splnet();
   3408 #endif
   3409 		/* It may call wm_start, so unlock here */
   3410 		error = ether_ioctl(ifp, cmd, data);
   3411 #ifdef WM_MPSAFE
   3412 		splx(s);
   3413 #endif
   3414 		if (error != ENETRESET)
   3415 			break;
   3416 
   3417 		error = 0;
   3418 
   3419 		if (cmd == SIOCSIFCAP)
   3420 			error = (*ifp->if_init)(ifp);
   3421 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3422 			;
   3423 		else if (ifp->if_flags & IFF_RUNNING) {
   3424 			/*
   3425 			 * Multicast list has changed; set the hardware filter
   3426 			 * accordingly.
   3427 			 */
   3428 			WM_CORE_LOCK(sc);
   3429 			wm_set_filter(sc);
   3430 			WM_CORE_UNLOCK(sc);
   3431 		}
   3432 		break;
   3433 	}
   3434 
   3435 #ifndef WM_MPSAFE
   3436 	splx(s);
   3437 #endif
   3438 	return error;
   3439 }
   3440 
   3441 /* MAC address related */
   3442 
   3443 /*
   3444  * Get the offset of MAC address and return it.
   3445  * If error occured, use offset 0.
   3446  */
   3447 static uint16_t
   3448 wm_check_alt_mac_addr(struct wm_softc *sc)
   3449 {
   3450 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3451 	uint16_t offset = NVM_OFF_MACADDR;
   3452 
   3453 	/* Try to read alternative MAC address pointer */
   3454 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3455 		return 0;
   3456 
   3457 	/* Check pointer if it's valid or not. */
   3458 	if ((offset == 0x0000) || (offset == 0xffff))
   3459 		return 0;
   3460 
   3461 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3462 	/*
   3463 	 * Check whether alternative MAC address is valid or not.
   3464 	 * Some cards have non 0xffff pointer but those don't use
   3465 	 * alternative MAC address in reality.
   3466 	 *
   3467 	 * Check whether the broadcast bit is set or not.
   3468 	 */
   3469 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3470 		if (((myea[0] & 0xff) & 0x01) == 0)
   3471 			return offset; /* Found */
   3472 
   3473 	/* Not found */
   3474 	return 0;
   3475 }
   3476 
   3477 static int
   3478 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3479 {
   3480 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3481 	uint16_t offset = NVM_OFF_MACADDR;
   3482 	int do_invert = 0;
   3483 
   3484 	switch (sc->sc_type) {
   3485 	case WM_T_82580:
   3486 	case WM_T_I350:
   3487 	case WM_T_I354:
   3488 		/* EEPROM Top Level Partitioning */
   3489 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3490 		break;
   3491 	case WM_T_82571:
   3492 	case WM_T_82575:
   3493 	case WM_T_82576:
   3494 	case WM_T_80003:
   3495 	case WM_T_I210:
   3496 	case WM_T_I211:
   3497 		offset = wm_check_alt_mac_addr(sc);
   3498 		if (offset == 0)
   3499 			if ((sc->sc_funcid & 0x01) == 1)
   3500 				do_invert = 1;
   3501 		break;
   3502 	default:
   3503 		if ((sc->sc_funcid & 0x01) == 1)
   3504 			do_invert = 1;
   3505 		break;
   3506 	}
   3507 
   3508 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3509 		goto bad;
   3510 
   3511 	enaddr[0] = myea[0] & 0xff;
   3512 	enaddr[1] = myea[0] >> 8;
   3513 	enaddr[2] = myea[1] & 0xff;
   3514 	enaddr[3] = myea[1] >> 8;
   3515 	enaddr[4] = myea[2] & 0xff;
   3516 	enaddr[5] = myea[2] >> 8;
   3517 
   3518 	/*
   3519 	 * Toggle the LSB of the MAC address on the second port
   3520 	 * of some dual port cards.
   3521 	 */
   3522 	if (do_invert != 0)
   3523 		enaddr[5] ^= 1;
   3524 
   3525 	return 0;
   3526 
   3527  bad:
   3528 	return -1;
   3529 }
   3530 
   3531 /*
   3532  * wm_set_ral:
   3533  *
   3534  *	Set an entery in the receive address list.
   3535  */
   3536 static void
   3537 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3538 {
   3539 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3540 	uint32_t wlock_mac;
   3541 	int rv;
   3542 
   3543 	if (enaddr != NULL) {
   3544 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3545 		    (enaddr[3] << 24);
   3546 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3547 		ral_hi |= RAL_AV;
   3548 	} else {
   3549 		ral_lo = 0;
   3550 		ral_hi = 0;
   3551 	}
   3552 
   3553 	switch (sc->sc_type) {
   3554 	case WM_T_82542_2_0:
   3555 	case WM_T_82542_2_1:
   3556 	case WM_T_82543:
   3557 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3558 		CSR_WRITE_FLUSH(sc);
   3559 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3560 		CSR_WRITE_FLUSH(sc);
   3561 		break;
   3562 	case WM_T_PCH2:
   3563 	case WM_T_PCH_LPT:
   3564 	case WM_T_PCH_SPT:
   3565 	case WM_T_PCH_CNP:
   3566 		if (idx == 0) {
   3567 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3568 			CSR_WRITE_FLUSH(sc);
   3569 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3570 			CSR_WRITE_FLUSH(sc);
   3571 			return;
   3572 		}
   3573 		if (sc->sc_type != WM_T_PCH2) {
   3574 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3575 			    FWSM_WLOCK_MAC);
   3576 			addrl = WMREG_SHRAL(idx - 1);
   3577 			addrh = WMREG_SHRAH(idx - 1);
   3578 		} else {
   3579 			wlock_mac = 0;
   3580 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3581 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3582 		}
   3583 
   3584 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3585 			rv = wm_get_swflag_ich8lan(sc);
   3586 			if (rv != 0)
   3587 				return;
   3588 			CSR_WRITE(sc, addrl, ral_lo);
   3589 			CSR_WRITE_FLUSH(sc);
   3590 			CSR_WRITE(sc, addrh, ral_hi);
   3591 			CSR_WRITE_FLUSH(sc);
   3592 			wm_put_swflag_ich8lan(sc);
   3593 		}
   3594 
   3595 		break;
   3596 	default:
   3597 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3598 		CSR_WRITE_FLUSH(sc);
   3599 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3600 		CSR_WRITE_FLUSH(sc);
   3601 		break;
   3602 	}
   3603 }
   3604 
   3605 /*
   3606  * wm_mchash:
   3607  *
   3608  *	Compute the hash of the multicast address for the 4096-bit
   3609  *	multicast filter.
   3610  */
   3611 static uint32_t
   3612 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3613 {
   3614 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3615 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3616 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3617 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3618 	uint32_t hash;
   3619 
   3620 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3621 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3622 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3623 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3624 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3625 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3626 		return (hash & 0x3ff);
   3627 	}
   3628 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3629 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3630 
   3631 	return (hash & 0xfff);
   3632 }
   3633 
   3634 /*
   3635  *
   3636  *
   3637  */
   3638 static int
   3639 wm_rar_count(struct wm_softc *sc)
   3640 {
   3641 	int size;
   3642 
   3643 	switch (sc->sc_type) {
   3644 	case WM_T_ICH8:
   3645 		size = WM_RAL_TABSIZE_ICH8 -1;
   3646 		break;
   3647 	case WM_T_ICH9:
   3648 	case WM_T_ICH10:
   3649 	case WM_T_PCH:
   3650 		size = WM_RAL_TABSIZE_ICH8;
   3651 		break;
   3652 	case WM_T_PCH2:
   3653 		size = WM_RAL_TABSIZE_PCH2;
   3654 		break;
   3655 	case WM_T_PCH_LPT:
   3656 	case WM_T_PCH_SPT:
   3657 	case WM_T_PCH_CNP:
   3658 		size = WM_RAL_TABSIZE_PCH_LPT;
   3659 		break;
   3660 	case WM_T_82575:
   3661 	case WM_T_I210:
   3662 	case WM_T_I211:
   3663 		size = WM_RAL_TABSIZE_82575;
   3664 		break;
   3665 	case WM_T_82576:
   3666 	case WM_T_82580:
   3667 		size = WM_RAL_TABSIZE_82576;
   3668 		break;
   3669 	case WM_T_I350:
   3670 	case WM_T_I354:
   3671 		size = WM_RAL_TABSIZE_I350;
   3672 		break;
   3673 	default:
   3674 		size = WM_RAL_TABSIZE;
   3675 	}
   3676 
   3677 	return size;
   3678 }
   3679 
   3680 /*
   3681  * wm_set_filter:
   3682  *
   3683  *	Set up the receive filter.
   3684  */
   3685 static void
   3686 wm_set_filter(struct wm_softc *sc)
   3687 {
   3688 	struct ethercom *ec = &sc->sc_ethercom;
   3689 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3690 	struct ether_multi *enm;
   3691 	struct ether_multistep step;
   3692 	bus_addr_t mta_reg;
   3693 	uint32_t hash, reg, bit;
   3694 	int i, size, ralmax;
   3695 
   3696 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3697 		device_xname(sc->sc_dev), __func__));
   3698 
   3699 	if (sc->sc_type >= WM_T_82544)
   3700 		mta_reg = WMREG_CORDOVA_MTA;
   3701 	else
   3702 		mta_reg = WMREG_MTA;
   3703 
   3704 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3705 
   3706 	if (ifp->if_flags & IFF_BROADCAST)
   3707 		sc->sc_rctl |= RCTL_BAM;
   3708 	if (ifp->if_flags & IFF_PROMISC) {
   3709 		sc->sc_rctl |= RCTL_UPE;
   3710 		ETHER_LOCK(ec);
   3711 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3712 		ETHER_UNLOCK(ec);
   3713 		goto allmulti;
   3714 	}
   3715 
   3716 	/*
   3717 	 * Set the station address in the first RAL slot, and
   3718 	 * clear the remaining slots.
   3719 	 */
   3720 	size = wm_rar_count(sc);
   3721 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3722 
   3723 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3724 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3725 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3726 		switch (i) {
   3727 		case 0:
   3728 			/* We can use all entries */
   3729 			ralmax = size;
   3730 			break;
   3731 		case 1:
   3732 			/* Only RAR[0] */
   3733 			ralmax = 1;
   3734 			break;
   3735 		default:
   3736 			/* Available SHRA + RAR[0] */
   3737 			ralmax = i + 1;
   3738 		}
   3739 	} else
   3740 		ralmax = size;
   3741 	for (i = 1; i < size; i++) {
   3742 		if (i < ralmax)
   3743 			wm_set_ral(sc, NULL, i);
   3744 	}
   3745 
   3746 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3747 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3748 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3749 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3750 		size = WM_ICH8_MC_TABSIZE;
   3751 	else
   3752 		size = WM_MC_TABSIZE;
   3753 	/* Clear out the multicast table. */
   3754 	for (i = 0; i < size; i++) {
   3755 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3756 		CSR_WRITE_FLUSH(sc);
   3757 	}
   3758 
   3759 	ETHER_LOCK(ec);
   3760 	ETHER_FIRST_MULTI(step, ec, enm);
   3761 	while (enm != NULL) {
   3762 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3763 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3764 			ETHER_UNLOCK(ec);
   3765 			/*
   3766 			 * We must listen to a range of multicast addresses.
   3767 			 * For now, just accept all multicasts, rather than
   3768 			 * trying to set only those filter bits needed to match
   3769 			 * the range.  (At this time, the only use of address
   3770 			 * ranges is for IP multicast routing, for which the
   3771 			 * range is big enough to require all bits set.)
   3772 			 */
   3773 			goto allmulti;
   3774 		}
   3775 
   3776 		hash = wm_mchash(sc, enm->enm_addrlo);
   3777 
   3778 		reg = (hash >> 5);
   3779 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3780 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3781 		    || (sc->sc_type == WM_T_PCH2)
   3782 		    || (sc->sc_type == WM_T_PCH_LPT)
   3783 		    || (sc->sc_type == WM_T_PCH_SPT)
   3784 		    || (sc->sc_type == WM_T_PCH_CNP))
   3785 			reg &= 0x1f;
   3786 		else
   3787 			reg &= 0x7f;
   3788 		bit = hash & 0x1f;
   3789 
   3790 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3791 		hash |= 1U << bit;
   3792 
   3793 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3794 			/*
   3795 			 * 82544 Errata 9: Certain register cannot be written
   3796 			 * with particular alignments in PCI-X bus operation
   3797 			 * (FCAH, MTA and VFTA).
   3798 			 */
   3799 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3800 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3801 			CSR_WRITE_FLUSH(sc);
   3802 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3803 			CSR_WRITE_FLUSH(sc);
   3804 		} else {
   3805 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3806 			CSR_WRITE_FLUSH(sc);
   3807 		}
   3808 
   3809 		ETHER_NEXT_MULTI(step, enm);
   3810 	}
   3811 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   3812 	ETHER_UNLOCK(ec);
   3813 
   3814 	goto setit;
   3815 
   3816  allmulti:
   3817 	sc->sc_rctl |= RCTL_MPE;
   3818 
   3819  setit:
   3820 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3821 }
   3822 
   3823 /* Reset and init related */
   3824 
   3825 static void
   3826 wm_set_vlan(struct wm_softc *sc)
   3827 {
   3828 
   3829 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3830 		device_xname(sc->sc_dev), __func__));
   3831 
   3832 	/* Deal with VLAN enables. */
   3833 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3834 		sc->sc_ctrl |= CTRL_VME;
   3835 	else
   3836 		sc->sc_ctrl &= ~CTRL_VME;
   3837 
   3838 	/* Write the control registers. */
   3839 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3840 }
   3841 
   3842 static void
   3843 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3844 {
   3845 	uint32_t gcr;
   3846 	pcireg_t ctrl2;
   3847 
   3848 	gcr = CSR_READ(sc, WMREG_GCR);
   3849 
   3850 	/* Only take action if timeout value is defaulted to 0 */
   3851 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3852 		goto out;
   3853 
   3854 	if ((gcr & GCR_CAP_VER2) == 0) {
   3855 		gcr |= GCR_CMPL_TMOUT_10MS;
   3856 		goto out;
   3857 	}
   3858 
   3859 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3860 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3861 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3862 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3863 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3864 
   3865 out:
   3866 	/* Disable completion timeout resend */
   3867 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3868 
   3869 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3870 }
   3871 
   3872 void
   3873 wm_get_auto_rd_done(struct wm_softc *sc)
   3874 {
   3875 	int i;
   3876 
   3877 	/* wait for eeprom to reload */
   3878 	switch (sc->sc_type) {
   3879 	case WM_T_82571:
   3880 	case WM_T_82572:
   3881 	case WM_T_82573:
   3882 	case WM_T_82574:
   3883 	case WM_T_82583:
   3884 	case WM_T_82575:
   3885 	case WM_T_82576:
   3886 	case WM_T_82580:
   3887 	case WM_T_I350:
   3888 	case WM_T_I354:
   3889 	case WM_T_I210:
   3890 	case WM_T_I211:
   3891 	case WM_T_80003:
   3892 	case WM_T_ICH8:
   3893 	case WM_T_ICH9:
   3894 		for (i = 0; i < 10; i++) {
   3895 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3896 				break;
   3897 			delay(1000);
   3898 		}
   3899 		if (i == 10) {
   3900 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3901 			    "complete\n", device_xname(sc->sc_dev));
   3902 		}
   3903 		break;
   3904 	default:
   3905 		break;
   3906 	}
   3907 }
   3908 
   3909 void
   3910 wm_lan_init_done(struct wm_softc *sc)
   3911 {
   3912 	uint32_t reg = 0;
   3913 	int i;
   3914 
   3915 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3916 		device_xname(sc->sc_dev), __func__));
   3917 
   3918 	/* Wait for eeprom to reload */
   3919 	switch (sc->sc_type) {
   3920 	case WM_T_ICH10:
   3921 	case WM_T_PCH:
   3922 	case WM_T_PCH2:
   3923 	case WM_T_PCH_LPT:
   3924 	case WM_T_PCH_SPT:
   3925 	case WM_T_PCH_CNP:
   3926 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3927 			reg = CSR_READ(sc, WMREG_STATUS);
   3928 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3929 				break;
   3930 			delay(100);
   3931 		}
   3932 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3933 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3934 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3935 		}
   3936 		break;
   3937 	default:
   3938 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3939 		    __func__);
   3940 		break;
   3941 	}
   3942 
   3943 	reg &= ~STATUS_LAN_INIT_DONE;
   3944 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3945 }
   3946 
   3947 void
   3948 wm_get_cfg_done(struct wm_softc *sc)
   3949 {
   3950 	int mask;
   3951 	uint32_t reg;
   3952 	int i;
   3953 
   3954 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3955 		device_xname(sc->sc_dev), __func__));
   3956 
   3957 	/* Wait for eeprom to reload */
   3958 	switch (sc->sc_type) {
   3959 	case WM_T_82542_2_0:
   3960 	case WM_T_82542_2_1:
   3961 		/* null */
   3962 		break;
   3963 	case WM_T_82543:
   3964 	case WM_T_82544:
   3965 	case WM_T_82540:
   3966 	case WM_T_82545:
   3967 	case WM_T_82545_3:
   3968 	case WM_T_82546:
   3969 	case WM_T_82546_3:
   3970 	case WM_T_82541:
   3971 	case WM_T_82541_2:
   3972 	case WM_T_82547:
   3973 	case WM_T_82547_2:
   3974 	case WM_T_82573:
   3975 	case WM_T_82574:
   3976 	case WM_T_82583:
   3977 		/* generic */
   3978 		delay(10*1000);
   3979 		break;
   3980 	case WM_T_80003:
   3981 	case WM_T_82571:
   3982 	case WM_T_82572:
   3983 	case WM_T_82575:
   3984 	case WM_T_82576:
   3985 	case WM_T_82580:
   3986 	case WM_T_I350:
   3987 	case WM_T_I354:
   3988 	case WM_T_I210:
   3989 	case WM_T_I211:
   3990 		if (sc->sc_type == WM_T_82571) {
   3991 			/* Only 82571 shares port 0 */
   3992 			mask = EEMNGCTL_CFGDONE_0;
   3993 		} else
   3994 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3995 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3996 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3997 				break;
   3998 			delay(1000);
   3999 		}
   4000 		if (i >= WM_PHY_CFG_TIMEOUT)
   4001 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   4002 				device_xname(sc->sc_dev), __func__));
   4003 		break;
   4004 	case WM_T_ICH8:
   4005 	case WM_T_ICH9:
   4006 	case WM_T_ICH10:
   4007 	case WM_T_PCH:
   4008 	case WM_T_PCH2:
   4009 	case WM_T_PCH_LPT:
   4010 	case WM_T_PCH_SPT:
   4011 	case WM_T_PCH_CNP:
   4012 		delay(10*1000);
   4013 		if (sc->sc_type >= WM_T_ICH10)
   4014 			wm_lan_init_done(sc);
   4015 		else
   4016 			wm_get_auto_rd_done(sc);
   4017 
   4018 		/* Clear PHY Reset Asserted bit */
   4019 		reg = CSR_READ(sc, WMREG_STATUS);
   4020 		if ((reg & STATUS_PHYRA) != 0)
   4021 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4022 		break;
   4023 	default:
   4024 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4025 		    __func__);
   4026 		break;
   4027 	}
   4028 }
   4029 
   4030 int
   4031 wm_phy_post_reset(struct wm_softc *sc)
   4032 {
   4033 	device_t dev = sc->sc_dev;
   4034 	uint16_t reg;
   4035 	int rv = 0;
   4036 
   4037 	/* This function is only for ICH8 and newer. */
   4038 	if (sc->sc_type < WM_T_ICH8)
   4039 		return 0;
   4040 
   4041 	if (wm_phy_resetisblocked(sc)) {
   4042 		/* XXX */
   4043 		device_printf(dev, "PHY is blocked\n");
   4044 		return -1;
   4045 	}
   4046 
   4047 	/* Allow time for h/w to get to quiescent state after reset */
   4048 	delay(10*1000);
   4049 
   4050 	/* Perform any necessary post-reset workarounds */
   4051 	if (sc->sc_type == WM_T_PCH)
   4052 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4053 	else if (sc->sc_type == WM_T_PCH2)
   4054 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4055 	if (rv != 0)
   4056 		return rv;
   4057 
   4058 	/* Clear the host wakeup bit after lcd reset */
   4059 	if (sc->sc_type >= WM_T_PCH) {
   4060 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4061 		reg &= ~BM_WUC_HOST_WU_BIT;
   4062 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4063 	}
   4064 
   4065 	/* Configure the LCD with the extended configuration region in NVM */
   4066 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4067 		return rv;
   4068 
   4069 	/* Configure the LCD with the OEM bits in NVM */
   4070 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4071 
   4072 	if (sc->sc_type == WM_T_PCH2) {
   4073 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4074 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4075 			delay(10 * 1000);
   4076 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4077 		}
   4078 		/* Set EEE LPI Update Timer to 200usec */
   4079 		rv = sc->phy.acquire(sc);
   4080 		if (rv)
   4081 			return rv;
   4082 		rv = wm_write_emi_reg_locked(dev,
   4083 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4084 		sc->phy.release(sc);
   4085 	}
   4086 
   4087 	return rv;
   4088 }
   4089 
   4090 /* Only for PCH and newer */
   4091 static int
   4092 wm_write_smbus_addr(struct wm_softc *sc)
   4093 {
   4094 	uint32_t strap, freq;
   4095 	uint16_t phy_data;
   4096 	int rv;
   4097 
   4098 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4099 		device_xname(sc->sc_dev), __func__));
   4100 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4101 
   4102 	strap = CSR_READ(sc, WMREG_STRAP);
   4103 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4104 
   4105 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4106 	if (rv != 0)
   4107 		return -1;
   4108 
   4109 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4110 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4111 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4112 
   4113 	if (sc->sc_phytype == WMPHY_I217) {
   4114 		/* Restore SMBus frequency */
   4115 		if (freq --) {
   4116 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4117 			    | HV_SMB_ADDR_FREQ_HIGH);
   4118 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4119 			    HV_SMB_ADDR_FREQ_LOW);
   4120 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4121 			    HV_SMB_ADDR_FREQ_HIGH);
   4122 		} else
   4123 			DPRINTF(WM_DEBUG_INIT,
   4124 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4125 				device_xname(sc->sc_dev), __func__));
   4126 	}
   4127 
   4128 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4129 	    phy_data);
   4130 }
   4131 
   4132 static int
   4133 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4134 {
   4135 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4136 	uint16_t phy_page = 0;
   4137 	int rv = 0;
   4138 
   4139 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4140 		device_xname(sc->sc_dev), __func__));
   4141 
   4142 	switch (sc->sc_type) {
   4143 	case WM_T_ICH8:
   4144 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4145 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4146 			return 0;
   4147 
   4148 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4149 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4150 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4151 			break;
   4152 		}
   4153 		/* FALLTHROUGH */
   4154 	case WM_T_PCH:
   4155 	case WM_T_PCH2:
   4156 	case WM_T_PCH_LPT:
   4157 	case WM_T_PCH_SPT:
   4158 	case WM_T_PCH_CNP:
   4159 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4160 		break;
   4161 	default:
   4162 		return 0;
   4163 	}
   4164 
   4165 	if ((rv = sc->phy.acquire(sc)) != 0)
   4166 		return rv;
   4167 
   4168 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4169 	if ((reg & sw_cfg_mask) == 0)
   4170 		goto release;
   4171 
   4172 	/*
   4173 	 * Make sure HW does not configure LCD from PHY extended configuration
   4174 	 * before SW configuration
   4175 	 */
   4176 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4177 	if ((sc->sc_type < WM_T_PCH2)
   4178 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4179 		goto release;
   4180 
   4181 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4182 		device_xname(sc->sc_dev), __func__));
   4183 	/* word_addr is in DWORD */
   4184 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4185 
   4186 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4187 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4188 	if (cnf_size == 0)
   4189 		goto release;
   4190 
   4191 	if (((sc->sc_type == WM_T_PCH)
   4192 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4193 	    || (sc->sc_type > WM_T_PCH)) {
   4194 		/*
   4195 		 * HW configures the SMBus address and LEDs when the OEM and
   4196 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4197 		 * are cleared, SW will configure them instead.
   4198 		 */
   4199 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4200 			device_xname(sc->sc_dev), __func__));
   4201 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4202 			goto release;
   4203 
   4204 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4205 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4206 		    (uint16_t)reg);
   4207 		if (rv != 0)
   4208 			goto release;
   4209 	}
   4210 
   4211 	/* Configure LCD from extended configuration region. */
   4212 	for (i = 0; i < cnf_size; i++) {
   4213 		uint16_t reg_data, reg_addr;
   4214 
   4215 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4216 			goto release;
   4217 
   4218 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4219 			goto release;
   4220 
   4221 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4222 			phy_page = reg_data;
   4223 
   4224 		reg_addr &= IGPHY_MAXREGADDR;
   4225 		reg_addr |= phy_page;
   4226 
   4227 		KASSERT(sc->phy.writereg_locked != NULL);
   4228 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4229 		    reg_data);
   4230 	}
   4231 
   4232 release:
   4233 	sc->phy.release(sc);
   4234 	return rv;
   4235 }
   4236 
   4237 /*
   4238  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4239  *  @sc:       pointer to the HW structure
   4240  *  @d0_state: boolean if entering d0 or d3 device state
   4241  *
   4242  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4243  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4244  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4245  */
   4246 int
   4247 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4248 {
   4249 	uint32_t mac_reg;
   4250 	uint16_t oem_reg;
   4251 	int rv;
   4252 
   4253 	if (sc->sc_type < WM_T_PCH)
   4254 		return 0;
   4255 
   4256 	rv = sc->phy.acquire(sc);
   4257 	if (rv != 0)
   4258 		return rv;
   4259 
   4260 	if (sc->sc_type == WM_T_PCH) {
   4261 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4262 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4263 			goto release;
   4264 	}
   4265 
   4266 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4267 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4268 		goto release;
   4269 
   4270 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4271 
   4272 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4273 	if (rv != 0)
   4274 		goto release;
   4275 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4276 
   4277 	if (d0_state) {
   4278 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4279 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4280 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4281 			oem_reg |= HV_OEM_BITS_LPLU;
   4282 	} else {
   4283 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4284 		    != 0)
   4285 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4286 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4287 		    != 0)
   4288 			oem_reg |= HV_OEM_BITS_LPLU;
   4289 	}
   4290 
   4291 	/* Set Restart auto-neg to activate the bits */
   4292 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4293 	    && (wm_phy_resetisblocked(sc) == false))
   4294 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4295 
   4296 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4297 
   4298 release:
   4299 	sc->phy.release(sc);
   4300 
   4301 	return rv;
   4302 }
   4303 
   4304 /* Init hardware bits */
   4305 void
   4306 wm_initialize_hardware_bits(struct wm_softc *sc)
   4307 {
   4308 	uint32_t tarc0, tarc1, reg;
   4309 
   4310 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4311 		device_xname(sc->sc_dev), __func__));
   4312 
   4313 	/* For 82571 variant, 80003 and ICHs */
   4314 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4315 	    || (sc->sc_type >= WM_T_80003)) {
   4316 
   4317 		/* Transmit Descriptor Control 0 */
   4318 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4319 		reg |= TXDCTL_COUNT_DESC;
   4320 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4321 
   4322 		/* Transmit Descriptor Control 1 */
   4323 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4324 		reg |= TXDCTL_COUNT_DESC;
   4325 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4326 
   4327 		/* TARC0 */
   4328 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4329 		switch (sc->sc_type) {
   4330 		case WM_T_82571:
   4331 		case WM_T_82572:
   4332 		case WM_T_82573:
   4333 		case WM_T_82574:
   4334 		case WM_T_82583:
   4335 		case WM_T_80003:
   4336 			/* Clear bits 30..27 */
   4337 			tarc0 &= ~__BITS(30, 27);
   4338 			break;
   4339 		default:
   4340 			break;
   4341 		}
   4342 
   4343 		switch (sc->sc_type) {
   4344 		case WM_T_82571:
   4345 		case WM_T_82572:
   4346 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4347 
   4348 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4349 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4350 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4351 			/* 8257[12] Errata No.7 */
   4352 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4353 
   4354 			/* TARC1 bit 28 */
   4355 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4356 				tarc1 &= ~__BIT(28);
   4357 			else
   4358 				tarc1 |= __BIT(28);
   4359 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4360 
   4361 			/*
   4362 			 * 8257[12] Errata No.13
   4363 			 * Disable Dyamic Clock Gating.
   4364 			 */
   4365 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4366 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4367 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4368 			break;
   4369 		case WM_T_82573:
   4370 		case WM_T_82574:
   4371 		case WM_T_82583:
   4372 			if ((sc->sc_type == WM_T_82574)
   4373 			    || (sc->sc_type == WM_T_82583))
   4374 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4375 
   4376 			/* Extended Device Control */
   4377 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4378 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4379 			reg |= __BIT(22);	/* Set bit 22 */
   4380 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4381 
   4382 			/* Device Control */
   4383 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4384 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4385 
   4386 			/* PCIe Control Register */
   4387 			/*
   4388 			 * 82573 Errata (unknown).
   4389 			 *
   4390 			 * 82574 Errata 25 and 82583 Errata 12
   4391 			 * "Dropped Rx Packets":
   4392 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4393 			 */
   4394 			reg = CSR_READ(sc, WMREG_GCR);
   4395 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4396 			CSR_WRITE(sc, WMREG_GCR, reg);
   4397 
   4398 			if ((sc->sc_type == WM_T_82574)
   4399 			    || (sc->sc_type == WM_T_82583)) {
   4400 				/*
   4401 				 * Document says this bit must be set for
   4402 				 * proper operation.
   4403 				 */
   4404 				reg = CSR_READ(sc, WMREG_GCR);
   4405 				reg |= __BIT(22);
   4406 				CSR_WRITE(sc, WMREG_GCR, reg);
   4407 
   4408 				/*
   4409 				 * Apply workaround for hardware errata
   4410 				 * documented in errata docs Fixes issue where
   4411 				 * some error prone or unreliable PCIe
   4412 				 * completions are occurring, particularly
   4413 				 * with ASPM enabled. Without fix, issue can
   4414 				 * cause Tx timeouts.
   4415 				 */
   4416 				reg = CSR_READ(sc, WMREG_GCR2);
   4417 				reg |= __BIT(0);
   4418 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4419 			}
   4420 			break;
   4421 		case WM_T_80003:
   4422 			/* TARC0 */
   4423 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4424 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4425 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4426 
   4427 			/* TARC1 bit 28 */
   4428 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4429 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4430 				tarc1 &= ~__BIT(28);
   4431 			else
   4432 				tarc1 |= __BIT(28);
   4433 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4434 			break;
   4435 		case WM_T_ICH8:
   4436 		case WM_T_ICH9:
   4437 		case WM_T_ICH10:
   4438 		case WM_T_PCH:
   4439 		case WM_T_PCH2:
   4440 		case WM_T_PCH_LPT:
   4441 		case WM_T_PCH_SPT:
   4442 		case WM_T_PCH_CNP:
   4443 			/* TARC0 */
   4444 			if (sc->sc_type == WM_T_ICH8) {
   4445 				/* Set TARC0 bits 29 and 28 */
   4446 				tarc0 |= __BITS(29, 28);
   4447 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4448 				tarc0 |= __BIT(29);
   4449 				/*
   4450 				 *  Drop bit 28. From Linux.
   4451 				 * See I218/I219 spec update
   4452 				 * "5. Buffer Overrun While the I219 is
   4453 				 * Processing DMA Transactions"
   4454 				 */
   4455 				tarc0 &= ~__BIT(28);
   4456 			}
   4457 			/* Set TARC0 bits 23,24,26,27 */
   4458 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4459 
   4460 			/* CTRL_EXT */
   4461 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4462 			reg |= __BIT(22);	/* Set bit 22 */
   4463 			/*
   4464 			 * Enable PHY low-power state when MAC is at D3
   4465 			 * w/o WoL
   4466 			 */
   4467 			if (sc->sc_type >= WM_T_PCH)
   4468 				reg |= CTRL_EXT_PHYPDEN;
   4469 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4470 
   4471 			/* TARC1 */
   4472 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4473 			/* bit 28 */
   4474 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4475 				tarc1 &= ~__BIT(28);
   4476 			else
   4477 				tarc1 |= __BIT(28);
   4478 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4479 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4480 
   4481 			/* Device Status */
   4482 			if (sc->sc_type == WM_T_ICH8) {
   4483 				reg = CSR_READ(sc, WMREG_STATUS);
   4484 				reg &= ~__BIT(31);
   4485 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4486 
   4487 			}
   4488 
   4489 			/* IOSFPC */
   4490 			if (sc->sc_type == WM_T_PCH_SPT) {
   4491 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4492 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4493 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4494 			}
   4495 			/*
   4496 			 * Work-around descriptor data corruption issue during
   4497 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4498 			 * capability.
   4499 			 */
   4500 			reg = CSR_READ(sc, WMREG_RFCTL);
   4501 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4502 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4503 			break;
   4504 		default:
   4505 			break;
   4506 		}
   4507 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4508 
   4509 		switch (sc->sc_type) {
   4510 		/*
   4511 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4512 		 * Avoid RSS Hash Value bug.
   4513 		 */
   4514 		case WM_T_82571:
   4515 		case WM_T_82572:
   4516 		case WM_T_82573:
   4517 		case WM_T_80003:
   4518 		case WM_T_ICH8:
   4519 			reg = CSR_READ(sc, WMREG_RFCTL);
   4520 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4521 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4522 			break;
   4523 		case WM_T_82574:
   4524 			/* Use extened Rx descriptor. */
   4525 			reg = CSR_READ(sc, WMREG_RFCTL);
   4526 			reg |= WMREG_RFCTL_EXSTEN;
   4527 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4528 			break;
   4529 		default:
   4530 			break;
   4531 		}
   4532 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4533 		/*
   4534 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4535 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4536 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4537 		 * Correctly by the Device"
   4538 		 *
   4539 		 * I354(C2000) Errata AVR53:
   4540 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4541 		 * Hang"
   4542 		 */
   4543 		reg = CSR_READ(sc, WMREG_RFCTL);
   4544 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4545 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4546 	}
   4547 }
   4548 
   4549 static uint32_t
   4550 wm_rxpbs_adjust_82580(uint32_t val)
   4551 {
   4552 	uint32_t rv = 0;
   4553 
   4554 	if (val < __arraycount(wm_82580_rxpbs_table))
   4555 		rv = wm_82580_rxpbs_table[val];
   4556 
   4557 	return rv;
   4558 }
   4559 
   4560 /*
   4561  * wm_reset_phy:
   4562  *
   4563  *	generic PHY reset function.
   4564  *	Same as e1000_phy_hw_reset_generic()
   4565  */
   4566 static int
   4567 wm_reset_phy(struct wm_softc *sc)
   4568 {
   4569 	uint32_t reg;
   4570 
   4571 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4572 		device_xname(sc->sc_dev), __func__));
   4573 	if (wm_phy_resetisblocked(sc))
   4574 		return -1;
   4575 
   4576 	sc->phy.acquire(sc);
   4577 
   4578 	reg = CSR_READ(sc, WMREG_CTRL);
   4579 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4580 	CSR_WRITE_FLUSH(sc);
   4581 
   4582 	delay(sc->phy.reset_delay_us);
   4583 
   4584 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4585 	CSR_WRITE_FLUSH(sc);
   4586 
   4587 	delay(150);
   4588 
   4589 	sc->phy.release(sc);
   4590 
   4591 	wm_get_cfg_done(sc);
   4592 	wm_phy_post_reset(sc);
   4593 
   4594 	return 0;
   4595 }
   4596 
   4597 /*
   4598  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4599  * so it is enough to check sc->sc_queue[0] only.
   4600  */
   4601 static void
   4602 wm_flush_desc_rings(struct wm_softc *sc)
   4603 {
   4604 	pcireg_t preg;
   4605 	uint32_t reg;
   4606 	struct wm_txqueue *txq;
   4607 	wiseman_txdesc_t *txd;
   4608 	int nexttx;
   4609 	uint32_t rctl;
   4610 
   4611 	/* First, disable MULR fix in FEXTNVM11 */
   4612 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4613 	reg |= FEXTNVM11_DIS_MULRFIX;
   4614 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4615 
   4616 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4617 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4618 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4619 		return;
   4620 
   4621 	/* TX */
   4622 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4623 	    device_xname(sc->sc_dev), preg, reg);
   4624 	reg = CSR_READ(sc, WMREG_TCTL);
   4625 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4626 
   4627 	txq = &sc->sc_queue[0].wmq_txq;
   4628 	nexttx = txq->txq_next;
   4629 	txd = &txq->txq_descs[nexttx];
   4630 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4631 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4632 	txd->wtx_fields.wtxu_status = 0;
   4633 	txd->wtx_fields.wtxu_options = 0;
   4634 	txd->wtx_fields.wtxu_vlan = 0;
   4635 
   4636 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4637 	    BUS_SPACE_BARRIER_WRITE);
   4638 
   4639 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4640 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4641 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4642 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4643 	delay(250);
   4644 
   4645 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4646 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4647 		return;
   4648 
   4649 	/* RX */
   4650 	printf("%s: Need RX flush (reg = %08x)\n",
   4651 	    device_xname(sc->sc_dev), preg);
   4652 	rctl = CSR_READ(sc, WMREG_RCTL);
   4653 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4654 	CSR_WRITE_FLUSH(sc);
   4655 	delay(150);
   4656 
   4657 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4658 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4659 	reg &= 0xffffc000;
   4660 	/*
   4661 	 * Update thresholds: prefetch threshold to 31, host threshold
   4662 	 * to 1 and make sure the granularity is "descriptors" and not
   4663 	 * "cache lines"
   4664 	 */
   4665 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4666 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4667 
   4668 	/* Momentarily enable the RX ring for the changes to take effect */
   4669 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4670 	CSR_WRITE_FLUSH(sc);
   4671 	delay(150);
   4672 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4673 }
   4674 
   4675 /*
   4676  * wm_reset:
   4677  *
   4678  *	Reset the i82542 chip.
   4679  */
   4680 static void
   4681 wm_reset(struct wm_softc *sc)
   4682 {
   4683 	int phy_reset = 0;
   4684 	int i, error = 0;
   4685 	uint32_t reg;
   4686 	uint16_t kmreg;
   4687 	int rv;
   4688 
   4689 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4690 		device_xname(sc->sc_dev), __func__));
   4691 	KASSERT(sc->sc_type != 0);
   4692 
   4693 	/*
   4694 	 * Allocate on-chip memory according to the MTU size.
   4695 	 * The Packet Buffer Allocation register must be written
   4696 	 * before the chip is reset.
   4697 	 */
   4698 	switch (sc->sc_type) {
   4699 	case WM_T_82547:
   4700 	case WM_T_82547_2:
   4701 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4702 		    PBA_22K : PBA_30K;
   4703 		for (i = 0; i < sc->sc_nqueues; i++) {
   4704 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4705 			txq->txq_fifo_head = 0;
   4706 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4707 			txq->txq_fifo_size =
   4708 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4709 			txq->txq_fifo_stall = 0;
   4710 		}
   4711 		break;
   4712 	case WM_T_82571:
   4713 	case WM_T_82572:
   4714 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4715 	case WM_T_80003:
   4716 		sc->sc_pba = PBA_32K;
   4717 		break;
   4718 	case WM_T_82573:
   4719 		sc->sc_pba = PBA_12K;
   4720 		break;
   4721 	case WM_T_82574:
   4722 	case WM_T_82583:
   4723 		sc->sc_pba = PBA_20K;
   4724 		break;
   4725 	case WM_T_82576:
   4726 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4727 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4728 		break;
   4729 	case WM_T_82580:
   4730 	case WM_T_I350:
   4731 	case WM_T_I354:
   4732 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4733 		break;
   4734 	case WM_T_I210:
   4735 	case WM_T_I211:
   4736 		sc->sc_pba = PBA_34K;
   4737 		break;
   4738 	case WM_T_ICH8:
   4739 		/* Workaround for a bit corruption issue in FIFO memory */
   4740 		sc->sc_pba = PBA_8K;
   4741 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4742 		break;
   4743 	case WM_T_ICH9:
   4744 	case WM_T_ICH10:
   4745 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4746 		    PBA_14K : PBA_10K;
   4747 		break;
   4748 	case WM_T_PCH:
   4749 	case WM_T_PCH2:	/* XXX 14K? */
   4750 	case WM_T_PCH_LPT:
   4751 	case WM_T_PCH_SPT:
   4752 	case WM_T_PCH_CNP:
   4753 		sc->sc_pba = PBA_26K;
   4754 		break;
   4755 	default:
   4756 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4757 		    PBA_40K : PBA_48K;
   4758 		break;
   4759 	}
   4760 	/*
   4761 	 * Only old or non-multiqueue devices have the PBA register
   4762 	 * XXX Need special handling for 82575.
   4763 	 */
   4764 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4765 	    || (sc->sc_type == WM_T_82575))
   4766 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4767 
   4768 	/* Prevent the PCI-E bus from sticking */
   4769 	if (sc->sc_flags & WM_F_PCIE) {
   4770 		int timeout = 800;
   4771 
   4772 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4773 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4774 
   4775 		while (timeout--) {
   4776 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4777 			    == 0)
   4778 				break;
   4779 			delay(100);
   4780 		}
   4781 		if (timeout == 0)
   4782 			device_printf(sc->sc_dev,
   4783 			    "failed to disable busmastering\n");
   4784 	}
   4785 
   4786 	/* Set the completion timeout for interface */
   4787 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4788 	    || (sc->sc_type == WM_T_82580)
   4789 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4790 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4791 		wm_set_pcie_completion_timeout(sc);
   4792 
   4793 	/* Clear interrupt */
   4794 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4795 	if (wm_is_using_msix(sc)) {
   4796 		if (sc->sc_type != WM_T_82574) {
   4797 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4798 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4799 		} else
   4800 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4801 	}
   4802 
   4803 	/* Stop the transmit and receive processes. */
   4804 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4805 	sc->sc_rctl &= ~RCTL_EN;
   4806 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4807 	CSR_WRITE_FLUSH(sc);
   4808 
   4809 	/* XXX set_tbi_sbp_82543() */
   4810 
   4811 	delay(10*1000);
   4812 
   4813 	/* Must acquire the MDIO ownership before MAC reset */
   4814 	switch (sc->sc_type) {
   4815 	case WM_T_82573:
   4816 	case WM_T_82574:
   4817 	case WM_T_82583:
   4818 		error = wm_get_hw_semaphore_82573(sc);
   4819 		break;
   4820 	default:
   4821 		break;
   4822 	}
   4823 
   4824 	/*
   4825 	 * 82541 Errata 29? & 82547 Errata 28?
   4826 	 * See also the description about PHY_RST bit in CTRL register
   4827 	 * in 8254x_GBe_SDM.pdf.
   4828 	 */
   4829 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4830 		CSR_WRITE(sc, WMREG_CTRL,
   4831 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4832 		CSR_WRITE_FLUSH(sc);
   4833 		delay(5000);
   4834 	}
   4835 
   4836 	switch (sc->sc_type) {
   4837 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4838 	case WM_T_82541:
   4839 	case WM_T_82541_2:
   4840 	case WM_T_82547:
   4841 	case WM_T_82547_2:
   4842 		/*
   4843 		 * On some chipsets, a reset through a memory-mapped write
   4844 		 * cycle can cause the chip to reset before completing the
   4845 		 * write cycle. This causes major headache that can be avoided
   4846 		 * by issuing the reset via indirect register writes through
   4847 		 * I/O space.
   4848 		 *
   4849 		 * So, if we successfully mapped the I/O BAR at attach time,
   4850 		 * use that. Otherwise, try our luck with a memory-mapped
   4851 		 * reset.
   4852 		 */
   4853 		if (sc->sc_flags & WM_F_IOH_VALID)
   4854 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4855 		else
   4856 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4857 		break;
   4858 	case WM_T_82545_3:
   4859 	case WM_T_82546_3:
   4860 		/* Use the shadow control register on these chips. */
   4861 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4862 		break;
   4863 	case WM_T_80003:
   4864 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4865 		sc->phy.acquire(sc);
   4866 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4867 		sc->phy.release(sc);
   4868 		break;
   4869 	case WM_T_ICH8:
   4870 	case WM_T_ICH9:
   4871 	case WM_T_ICH10:
   4872 	case WM_T_PCH:
   4873 	case WM_T_PCH2:
   4874 	case WM_T_PCH_LPT:
   4875 	case WM_T_PCH_SPT:
   4876 	case WM_T_PCH_CNP:
   4877 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4878 		if (wm_phy_resetisblocked(sc) == false) {
   4879 			/*
   4880 			 * Gate automatic PHY configuration by hardware on
   4881 			 * non-managed 82579
   4882 			 */
   4883 			if ((sc->sc_type == WM_T_PCH2)
   4884 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4885 				== 0))
   4886 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4887 
   4888 			reg |= CTRL_PHY_RESET;
   4889 			phy_reset = 1;
   4890 		} else
   4891 			printf("XXX reset is blocked!!!\n");
   4892 		sc->phy.acquire(sc);
   4893 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4894 		/* Don't insert a completion barrier when reset */
   4895 		delay(20*1000);
   4896 		mutex_exit(sc->sc_ich_phymtx);
   4897 		break;
   4898 	case WM_T_82580:
   4899 	case WM_T_I350:
   4900 	case WM_T_I354:
   4901 	case WM_T_I210:
   4902 	case WM_T_I211:
   4903 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4904 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4905 			CSR_WRITE_FLUSH(sc);
   4906 		delay(5000);
   4907 		break;
   4908 	case WM_T_82542_2_0:
   4909 	case WM_T_82542_2_1:
   4910 	case WM_T_82543:
   4911 	case WM_T_82540:
   4912 	case WM_T_82545:
   4913 	case WM_T_82546:
   4914 	case WM_T_82571:
   4915 	case WM_T_82572:
   4916 	case WM_T_82573:
   4917 	case WM_T_82574:
   4918 	case WM_T_82575:
   4919 	case WM_T_82576:
   4920 	case WM_T_82583:
   4921 	default:
   4922 		/* Everything else can safely use the documented method. */
   4923 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4924 		break;
   4925 	}
   4926 
   4927 	/* Must release the MDIO ownership after MAC reset */
   4928 	switch (sc->sc_type) {
   4929 	case WM_T_82573:
   4930 	case WM_T_82574:
   4931 	case WM_T_82583:
   4932 		if (error == 0)
   4933 			wm_put_hw_semaphore_82573(sc);
   4934 		break;
   4935 	default:
   4936 		break;
   4937 	}
   4938 
   4939 	/* Set Phy Config Counter to 50msec */
   4940 	if (sc->sc_type == WM_T_PCH2) {
   4941 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4942 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4943 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4944 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4945 	}
   4946 
   4947 	if (phy_reset != 0)
   4948 		wm_get_cfg_done(sc);
   4949 
   4950 	/* Reload EEPROM */
   4951 	switch (sc->sc_type) {
   4952 	case WM_T_82542_2_0:
   4953 	case WM_T_82542_2_1:
   4954 	case WM_T_82543:
   4955 	case WM_T_82544:
   4956 		delay(10);
   4957 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4958 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4959 		CSR_WRITE_FLUSH(sc);
   4960 		delay(2000);
   4961 		break;
   4962 	case WM_T_82540:
   4963 	case WM_T_82545:
   4964 	case WM_T_82545_3:
   4965 	case WM_T_82546:
   4966 	case WM_T_82546_3:
   4967 		delay(5*1000);
   4968 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4969 		break;
   4970 	case WM_T_82541:
   4971 	case WM_T_82541_2:
   4972 	case WM_T_82547:
   4973 	case WM_T_82547_2:
   4974 		delay(20000);
   4975 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4976 		break;
   4977 	case WM_T_82571:
   4978 	case WM_T_82572:
   4979 	case WM_T_82573:
   4980 	case WM_T_82574:
   4981 	case WM_T_82583:
   4982 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4983 			delay(10);
   4984 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4985 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4986 			CSR_WRITE_FLUSH(sc);
   4987 		}
   4988 		/* check EECD_EE_AUTORD */
   4989 		wm_get_auto_rd_done(sc);
   4990 		/*
   4991 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4992 		 * is set.
   4993 		 */
   4994 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4995 		    || (sc->sc_type == WM_T_82583))
   4996 			delay(25*1000);
   4997 		break;
   4998 	case WM_T_82575:
   4999 	case WM_T_82576:
   5000 	case WM_T_82580:
   5001 	case WM_T_I350:
   5002 	case WM_T_I354:
   5003 	case WM_T_I210:
   5004 	case WM_T_I211:
   5005 	case WM_T_80003:
   5006 		/* check EECD_EE_AUTORD */
   5007 		wm_get_auto_rd_done(sc);
   5008 		break;
   5009 	case WM_T_ICH8:
   5010 	case WM_T_ICH9:
   5011 	case WM_T_ICH10:
   5012 	case WM_T_PCH:
   5013 	case WM_T_PCH2:
   5014 	case WM_T_PCH_LPT:
   5015 	case WM_T_PCH_SPT:
   5016 	case WM_T_PCH_CNP:
   5017 		break;
   5018 	default:
   5019 		panic("%s: unknown type\n", __func__);
   5020 	}
   5021 
   5022 	/* Check whether EEPROM is present or not */
   5023 	switch (sc->sc_type) {
   5024 	case WM_T_82575:
   5025 	case WM_T_82576:
   5026 	case WM_T_82580:
   5027 	case WM_T_I350:
   5028 	case WM_T_I354:
   5029 	case WM_T_ICH8:
   5030 	case WM_T_ICH9:
   5031 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5032 			/* Not found */
   5033 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5034 			if (sc->sc_type == WM_T_82575)
   5035 				wm_reset_init_script_82575(sc);
   5036 		}
   5037 		break;
   5038 	default:
   5039 		break;
   5040 	}
   5041 
   5042 	if (phy_reset != 0)
   5043 		wm_phy_post_reset(sc);
   5044 
   5045 	if ((sc->sc_type == WM_T_82580)
   5046 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5047 		/* Clear global device reset status bit */
   5048 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5049 	}
   5050 
   5051 	/* Clear any pending interrupt events. */
   5052 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5053 	reg = CSR_READ(sc, WMREG_ICR);
   5054 	if (wm_is_using_msix(sc)) {
   5055 		if (sc->sc_type != WM_T_82574) {
   5056 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5057 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5058 		} else
   5059 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5060 	}
   5061 
   5062 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5063 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5064 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5065 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5066 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5067 		reg |= KABGTXD_BGSQLBIAS;
   5068 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5069 	}
   5070 
   5071 	/* Reload sc_ctrl */
   5072 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5073 
   5074 	wm_set_eee(sc);
   5075 
   5076 	/*
   5077 	 * For PCH, this write will make sure that any noise will be detected
   5078 	 * as a CRC error and be dropped rather than show up as a bad packet
   5079 	 * to the DMA engine
   5080 	 */
   5081 	if (sc->sc_type == WM_T_PCH)
   5082 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5083 
   5084 	if (sc->sc_type >= WM_T_82544)
   5085 		CSR_WRITE(sc, WMREG_WUC, 0);
   5086 
   5087 	if (sc->sc_type < WM_T_82575)
   5088 		wm_disable_aspm(sc); /* Workaround for some chips */
   5089 
   5090 	wm_reset_mdicnfg_82580(sc);
   5091 
   5092 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5093 		wm_pll_workaround_i210(sc);
   5094 
   5095 	if (sc->sc_type == WM_T_80003) {
   5096 		/* Default to TRUE to enable the MDIC W/A */
   5097 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5098 
   5099 		rv = wm_kmrn_readreg(sc,
   5100 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5101 		if (rv == 0) {
   5102 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5103 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5104 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5105 			else
   5106 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5107 		}
   5108 	}
   5109 }
   5110 
   5111 /*
   5112  * wm_add_rxbuf:
   5113  *
   5114  *	Add a receive buffer to the indiciated descriptor.
   5115  */
   5116 static int
   5117 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5118 {
   5119 	struct wm_softc *sc = rxq->rxq_sc;
   5120 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5121 	struct mbuf *m;
   5122 	int error;
   5123 
   5124 	KASSERT(mutex_owned(rxq->rxq_lock));
   5125 
   5126 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5127 	if (m == NULL)
   5128 		return ENOBUFS;
   5129 
   5130 	MCLGET(m, M_DONTWAIT);
   5131 	if ((m->m_flags & M_EXT) == 0) {
   5132 		m_freem(m);
   5133 		return ENOBUFS;
   5134 	}
   5135 
   5136 	if (rxs->rxs_mbuf != NULL)
   5137 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5138 
   5139 	rxs->rxs_mbuf = m;
   5140 
   5141 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5142 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   5143 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   5144 	if (error) {
   5145 		/* XXX XXX XXX */
   5146 		aprint_error_dev(sc->sc_dev,
   5147 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5148 		panic("wm_add_rxbuf");
   5149 	}
   5150 
   5151 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5152 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5153 
   5154 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5155 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5156 			wm_init_rxdesc(rxq, idx);
   5157 	} else
   5158 		wm_init_rxdesc(rxq, idx);
   5159 
   5160 	return 0;
   5161 }
   5162 
   5163 /*
   5164  * wm_rxdrain:
   5165  *
   5166  *	Drain the receive queue.
   5167  */
   5168 static void
   5169 wm_rxdrain(struct wm_rxqueue *rxq)
   5170 {
   5171 	struct wm_softc *sc = rxq->rxq_sc;
   5172 	struct wm_rxsoft *rxs;
   5173 	int i;
   5174 
   5175 	KASSERT(mutex_owned(rxq->rxq_lock));
   5176 
   5177 	for (i = 0; i < WM_NRXDESC; i++) {
   5178 		rxs = &rxq->rxq_soft[i];
   5179 		if (rxs->rxs_mbuf != NULL) {
   5180 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5181 			m_freem(rxs->rxs_mbuf);
   5182 			rxs->rxs_mbuf = NULL;
   5183 		}
   5184 	}
   5185 }
   5186 
   5187 /*
   5188  * Setup registers for RSS.
   5189  *
   5190  * XXX not yet VMDq support
   5191  */
   5192 static void
   5193 wm_init_rss(struct wm_softc *sc)
   5194 {
   5195 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5196 	int i;
   5197 
   5198 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5199 
   5200 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5201 		int qid, reta_ent;
   5202 
   5203 		qid  = i % sc->sc_nqueues;
   5204 		switch (sc->sc_type) {
   5205 		case WM_T_82574:
   5206 			reta_ent = __SHIFTIN(qid,
   5207 			    RETA_ENT_QINDEX_MASK_82574);
   5208 			break;
   5209 		case WM_T_82575:
   5210 			reta_ent = __SHIFTIN(qid,
   5211 			    RETA_ENT_QINDEX1_MASK_82575);
   5212 			break;
   5213 		default:
   5214 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5215 			break;
   5216 		}
   5217 
   5218 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5219 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5220 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5221 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5222 	}
   5223 
   5224 	rss_getkey((uint8_t *)rss_key);
   5225 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5226 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5227 
   5228 	if (sc->sc_type == WM_T_82574)
   5229 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5230 	else
   5231 		mrqc = MRQC_ENABLE_RSS_MQ;
   5232 
   5233 	/*
   5234 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5235 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5236 	 */
   5237 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5238 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5239 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5240 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5241 
   5242 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5243 }
   5244 
   5245 /*
   5246  * Adjust TX and RX queue numbers which the system actulally uses.
   5247  *
   5248  * The numbers are affected by below parameters.
   5249  *     - The nubmer of hardware queues
   5250  *     - The number of MSI-X vectors (= "nvectors" argument)
   5251  *     - ncpu
   5252  */
   5253 static void
   5254 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5255 {
   5256 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5257 
   5258 	if (nvectors < 2) {
   5259 		sc->sc_nqueues = 1;
   5260 		return;
   5261 	}
   5262 
   5263 	switch (sc->sc_type) {
   5264 	case WM_T_82572:
   5265 		hw_ntxqueues = 2;
   5266 		hw_nrxqueues = 2;
   5267 		break;
   5268 	case WM_T_82574:
   5269 		hw_ntxqueues = 2;
   5270 		hw_nrxqueues = 2;
   5271 		break;
   5272 	case WM_T_82575:
   5273 		hw_ntxqueues = 4;
   5274 		hw_nrxqueues = 4;
   5275 		break;
   5276 	case WM_T_82576:
   5277 		hw_ntxqueues = 16;
   5278 		hw_nrxqueues = 16;
   5279 		break;
   5280 	case WM_T_82580:
   5281 	case WM_T_I350:
   5282 	case WM_T_I354:
   5283 		hw_ntxqueues = 8;
   5284 		hw_nrxqueues = 8;
   5285 		break;
   5286 	case WM_T_I210:
   5287 		hw_ntxqueues = 4;
   5288 		hw_nrxqueues = 4;
   5289 		break;
   5290 	case WM_T_I211:
   5291 		hw_ntxqueues = 2;
   5292 		hw_nrxqueues = 2;
   5293 		break;
   5294 		/*
   5295 		 * As below ethernet controllers does not support MSI-X,
   5296 		 * this driver let them not use multiqueue.
   5297 		 *     - WM_T_80003
   5298 		 *     - WM_T_ICH8
   5299 		 *     - WM_T_ICH9
   5300 		 *     - WM_T_ICH10
   5301 		 *     - WM_T_PCH
   5302 		 *     - WM_T_PCH2
   5303 		 *     - WM_T_PCH_LPT
   5304 		 */
   5305 	default:
   5306 		hw_ntxqueues = 1;
   5307 		hw_nrxqueues = 1;
   5308 		break;
   5309 	}
   5310 
   5311 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5312 
   5313 	/*
   5314 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5315 	 * the number of queues used actually.
   5316 	 */
   5317 	if (nvectors < hw_nqueues + 1)
   5318 		sc->sc_nqueues = nvectors - 1;
   5319 	else
   5320 		sc->sc_nqueues = hw_nqueues;
   5321 
   5322 	/*
   5323 	 * As queues more then cpus cannot improve scaling, we limit
   5324 	 * the number of queues used actually.
   5325 	 */
   5326 	if (ncpu < sc->sc_nqueues)
   5327 		sc->sc_nqueues = ncpu;
   5328 }
   5329 
   5330 static inline bool
   5331 wm_is_using_msix(struct wm_softc *sc)
   5332 {
   5333 
   5334 	return (sc->sc_nintrs > 1);
   5335 }
   5336 
   5337 static inline bool
   5338 wm_is_using_multiqueue(struct wm_softc *sc)
   5339 {
   5340 
   5341 	return (sc->sc_nqueues > 1);
   5342 }
   5343 
   5344 static int
   5345 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5346 {
   5347 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5348 	wmq->wmq_id = qidx;
   5349 	wmq->wmq_intr_idx = intr_idx;
   5350 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5351 #ifdef WM_MPSAFE
   5352 	    | SOFTINT_MPSAFE
   5353 #endif
   5354 	    , wm_handle_queue, wmq);
   5355 	if (wmq->wmq_si != NULL)
   5356 		return 0;
   5357 
   5358 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5359 	    wmq->wmq_id);
   5360 
   5361 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5362 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5363 	return ENOMEM;
   5364 }
   5365 
   5366 /*
   5367  * Both single interrupt MSI and INTx can use this function.
   5368  */
   5369 static int
   5370 wm_setup_legacy(struct wm_softc *sc)
   5371 {
   5372 	pci_chipset_tag_t pc = sc->sc_pc;
   5373 	const char *intrstr = NULL;
   5374 	char intrbuf[PCI_INTRSTR_LEN];
   5375 	int error;
   5376 
   5377 	error = wm_alloc_txrx_queues(sc);
   5378 	if (error) {
   5379 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5380 		    error);
   5381 		return ENOMEM;
   5382 	}
   5383 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5384 	    sizeof(intrbuf));
   5385 #ifdef WM_MPSAFE
   5386 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5387 #endif
   5388 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5389 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5390 	if (sc->sc_ihs[0] == NULL) {
   5391 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5392 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5393 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5394 		return ENOMEM;
   5395 	}
   5396 
   5397 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5398 	sc->sc_nintrs = 1;
   5399 
   5400 	return wm_softint_establish(sc, 0, 0);
   5401 }
   5402 
   5403 static int
   5404 wm_setup_msix(struct wm_softc *sc)
   5405 {
   5406 	void *vih;
   5407 	kcpuset_t *affinity;
   5408 	int qidx, error, intr_idx, txrx_established;
   5409 	pci_chipset_tag_t pc = sc->sc_pc;
   5410 	const char *intrstr = NULL;
   5411 	char intrbuf[PCI_INTRSTR_LEN];
   5412 	char intr_xname[INTRDEVNAMEBUF];
   5413 
   5414 	if (sc->sc_nqueues < ncpu) {
   5415 		/*
   5416 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5417 		 * interrupts start from CPU#1.
   5418 		 */
   5419 		sc->sc_affinity_offset = 1;
   5420 	} else {
   5421 		/*
   5422 		 * In this case, this device use all CPUs. So, we unify
   5423 		 * affinitied cpu_index to msix vector number for readability.
   5424 		 */
   5425 		sc->sc_affinity_offset = 0;
   5426 	}
   5427 
   5428 	error = wm_alloc_txrx_queues(sc);
   5429 	if (error) {
   5430 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5431 		    error);
   5432 		return ENOMEM;
   5433 	}
   5434 
   5435 	kcpuset_create(&affinity, false);
   5436 	intr_idx = 0;
   5437 
   5438 	/*
   5439 	 * TX and RX
   5440 	 */
   5441 	txrx_established = 0;
   5442 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5443 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5444 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5445 
   5446 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5447 		    sizeof(intrbuf));
   5448 #ifdef WM_MPSAFE
   5449 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5450 		    PCI_INTR_MPSAFE, true);
   5451 #endif
   5452 		memset(intr_xname, 0, sizeof(intr_xname));
   5453 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5454 		    device_xname(sc->sc_dev), qidx);
   5455 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5456 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5457 		if (vih == NULL) {
   5458 			aprint_error_dev(sc->sc_dev,
   5459 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5460 			    intrstr ? " at " : "",
   5461 			    intrstr ? intrstr : "");
   5462 
   5463 			goto fail;
   5464 		}
   5465 		kcpuset_zero(affinity);
   5466 		/* Round-robin affinity */
   5467 		kcpuset_set(affinity, affinity_to);
   5468 		error = interrupt_distribute(vih, affinity, NULL);
   5469 		if (error == 0) {
   5470 			aprint_normal_dev(sc->sc_dev,
   5471 			    "for TX and RX interrupting at %s affinity to %u\n",
   5472 			    intrstr, affinity_to);
   5473 		} else {
   5474 			aprint_normal_dev(sc->sc_dev,
   5475 			    "for TX and RX interrupting at %s\n", intrstr);
   5476 		}
   5477 		sc->sc_ihs[intr_idx] = vih;
   5478 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5479 			goto fail;
   5480 		txrx_established++;
   5481 		intr_idx++;
   5482 	}
   5483 
   5484 	/* LINK */
   5485 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5486 	    sizeof(intrbuf));
   5487 #ifdef WM_MPSAFE
   5488 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5489 #endif
   5490 	memset(intr_xname, 0, sizeof(intr_xname));
   5491 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5492 	    device_xname(sc->sc_dev));
   5493 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5494 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5495 	if (vih == NULL) {
   5496 		aprint_error_dev(sc->sc_dev,
   5497 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5498 		    intrstr ? " at " : "",
   5499 		    intrstr ? intrstr : "");
   5500 
   5501 		goto fail;
   5502 	}
   5503 	/* Keep default affinity to LINK interrupt */
   5504 	aprint_normal_dev(sc->sc_dev,
   5505 	    "for LINK interrupting at %s\n", intrstr);
   5506 	sc->sc_ihs[intr_idx] = vih;
   5507 	sc->sc_link_intr_idx = intr_idx;
   5508 
   5509 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5510 	kcpuset_destroy(affinity);
   5511 	return 0;
   5512 
   5513  fail:
   5514 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5515 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5516 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5517 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5518 	}
   5519 
   5520 	kcpuset_destroy(affinity);
   5521 	return ENOMEM;
   5522 }
   5523 
   5524 static void
   5525 wm_unset_stopping_flags(struct wm_softc *sc)
   5526 {
   5527 	int i;
   5528 
   5529 	KASSERT(WM_CORE_LOCKED(sc));
   5530 
   5531 	/* Must unset stopping flags in ascending order. */
   5532 	for (i = 0; i < sc->sc_nqueues; i++) {
   5533 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5534 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5535 
   5536 		mutex_enter(txq->txq_lock);
   5537 		txq->txq_stopping = false;
   5538 		mutex_exit(txq->txq_lock);
   5539 
   5540 		mutex_enter(rxq->rxq_lock);
   5541 		rxq->rxq_stopping = false;
   5542 		mutex_exit(rxq->rxq_lock);
   5543 	}
   5544 
   5545 	sc->sc_core_stopping = false;
   5546 }
   5547 
   5548 static void
   5549 wm_set_stopping_flags(struct wm_softc *sc)
   5550 {
   5551 	int i;
   5552 
   5553 	KASSERT(WM_CORE_LOCKED(sc));
   5554 
   5555 	sc->sc_core_stopping = true;
   5556 
   5557 	/* Must set stopping flags in ascending order. */
   5558 	for (i = 0; i < sc->sc_nqueues; i++) {
   5559 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5560 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5561 
   5562 		mutex_enter(rxq->rxq_lock);
   5563 		rxq->rxq_stopping = true;
   5564 		mutex_exit(rxq->rxq_lock);
   5565 
   5566 		mutex_enter(txq->txq_lock);
   5567 		txq->txq_stopping = true;
   5568 		mutex_exit(txq->txq_lock);
   5569 	}
   5570 }
   5571 
   5572 /*
   5573  * Write interrupt interval value to ITR or EITR
   5574  */
   5575 static void
   5576 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5577 {
   5578 
   5579 	if (!wmq->wmq_set_itr)
   5580 		return;
   5581 
   5582 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5583 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5584 
   5585 		/*
   5586 		 * 82575 doesn't have CNT_INGR field.
   5587 		 * So, overwrite counter field by software.
   5588 		 */
   5589 		if (sc->sc_type == WM_T_82575)
   5590 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5591 		else
   5592 			eitr |= EITR_CNT_INGR;
   5593 
   5594 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5595 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5596 		/*
   5597 		 * 82574 has both ITR and EITR. SET EITR when we use
   5598 		 * the multi queue function with MSI-X.
   5599 		 */
   5600 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5601 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5602 	} else {
   5603 		KASSERT(wmq->wmq_id == 0);
   5604 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5605 	}
   5606 
   5607 	wmq->wmq_set_itr = false;
   5608 }
   5609 
   5610 /*
   5611  * TODO
   5612  * Below dynamic calculation of itr is almost the same as linux igb,
   5613  * however it does not fit to wm(4). So, we will have been disable AIM
   5614  * until we will find appropriate calculation of itr.
   5615  */
   5616 /*
   5617  * calculate interrupt interval value to be going to write register in
   5618  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5619  */
   5620 static void
   5621 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5622 {
   5623 #ifdef NOTYET
   5624 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5625 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5626 	uint32_t avg_size = 0;
   5627 	uint32_t new_itr;
   5628 
   5629 	if (rxq->rxq_packets)
   5630 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5631 	if (txq->txq_packets)
   5632 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5633 
   5634 	if (avg_size == 0) {
   5635 		new_itr = 450; /* restore default value */
   5636 		goto out;
   5637 	}
   5638 
   5639 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5640 	avg_size += 24;
   5641 
   5642 	/* Don't starve jumbo frames */
   5643 	avg_size = uimin(avg_size, 3000);
   5644 
   5645 	/* Give a little boost to mid-size frames */
   5646 	if ((avg_size > 300) && (avg_size < 1200))
   5647 		new_itr = avg_size / 3;
   5648 	else
   5649 		new_itr = avg_size / 2;
   5650 
   5651 out:
   5652 	/*
   5653 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5654 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5655 	 */
   5656 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5657 		new_itr *= 4;
   5658 
   5659 	if (new_itr != wmq->wmq_itr) {
   5660 		wmq->wmq_itr = new_itr;
   5661 		wmq->wmq_set_itr = true;
   5662 	} else
   5663 		wmq->wmq_set_itr = false;
   5664 
   5665 	rxq->rxq_packets = 0;
   5666 	rxq->rxq_bytes = 0;
   5667 	txq->txq_packets = 0;
   5668 	txq->txq_bytes = 0;
   5669 #endif
   5670 }
   5671 
   5672 /*
   5673  * wm_init:		[ifnet interface function]
   5674  *
   5675  *	Initialize the interface.
   5676  */
   5677 static int
   5678 wm_init(struct ifnet *ifp)
   5679 {
   5680 	struct wm_softc *sc = ifp->if_softc;
   5681 	int ret;
   5682 
   5683 	WM_CORE_LOCK(sc);
   5684 	ret = wm_init_locked(ifp);
   5685 	WM_CORE_UNLOCK(sc);
   5686 
   5687 	return ret;
   5688 }
   5689 
   5690 static int
   5691 wm_init_locked(struct ifnet *ifp)
   5692 {
   5693 	struct wm_softc *sc = ifp->if_softc;
   5694 	struct ethercom *ec = &sc->sc_ethercom;
   5695 	int i, j, trynum, error = 0;
   5696 	uint32_t reg;
   5697 
   5698 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5699 		device_xname(sc->sc_dev), __func__));
   5700 	KASSERT(WM_CORE_LOCKED(sc));
   5701 
   5702 	/*
   5703 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5704 	 * There is a small but measurable benefit to avoiding the adjusment
   5705 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5706 	 * on such platforms.  One possibility is that the DMA itself is
   5707 	 * slightly more efficient if the front of the entire packet (instead
   5708 	 * of the front of the headers) is aligned.
   5709 	 *
   5710 	 * Note we must always set align_tweak to 0 if we are using
   5711 	 * jumbo frames.
   5712 	 */
   5713 #ifdef __NO_STRICT_ALIGNMENT
   5714 	sc->sc_align_tweak = 0;
   5715 #else
   5716 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5717 		sc->sc_align_tweak = 0;
   5718 	else
   5719 		sc->sc_align_tweak = 2;
   5720 #endif /* __NO_STRICT_ALIGNMENT */
   5721 
   5722 	/* Cancel any pending I/O. */
   5723 	wm_stop_locked(ifp, 0);
   5724 
   5725 	/* Update statistics before reset */
   5726 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5727 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5728 
   5729 	/* PCH_SPT hardware workaround */
   5730 	if (sc->sc_type == WM_T_PCH_SPT)
   5731 		wm_flush_desc_rings(sc);
   5732 
   5733 	/* Reset the chip to a known state. */
   5734 	wm_reset(sc);
   5735 
   5736 	/*
   5737 	 * AMT based hardware can now take control from firmware
   5738 	 * Do this after reset.
   5739 	 */
   5740 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5741 		wm_get_hw_control(sc);
   5742 
   5743 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5744 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5745 		wm_legacy_irq_quirk_spt(sc);
   5746 
   5747 	/* Init hardware bits */
   5748 	wm_initialize_hardware_bits(sc);
   5749 
   5750 	/* Reset the PHY. */
   5751 	if (sc->sc_flags & WM_F_HAS_MII)
   5752 		wm_gmii_reset(sc);
   5753 
   5754 	if (sc->sc_type >= WM_T_ICH8) {
   5755 		reg = CSR_READ(sc, WMREG_GCR);
   5756 		/*
   5757 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5758 		 * default after reset.
   5759 		 */
   5760 		if (sc->sc_type == WM_T_ICH8)
   5761 			reg |= GCR_NO_SNOOP_ALL;
   5762 		else
   5763 			reg &= ~GCR_NO_SNOOP_ALL;
   5764 		CSR_WRITE(sc, WMREG_GCR, reg);
   5765 	}
   5766 	if ((sc->sc_type >= WM_T_ICH8)
   5767 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5768 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5769 
   5770 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5771 		reg |= CTRL_EXT_RO_DIS;
   5772 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5773 	}
   5774 
   5775 	/* Calculate (E)ITR value */
   5776 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5777 		/*
   5778 		 * For NEWQUEUE's EITR (except for 82575).
   5779 		 * 82575's EITR should be set same throttling value as other
   5780 		 * old controllers' ITR because the interrupt/sec calculation
   5781 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5782 		 *
   5783 		 * 82574's EITR should be set same throttling value as ITR.
   5784 		 *
   5785 		 * For N interrupts/sec, set this value to:
   5786 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5787 		 */
   5788 		sc->sc_itr_init = 450;
   5789 	} else if (sc->sc_type >= WM_T_82543) {
   5790 		/*
   5791 		 * Set up the interrupt throttling register (units of 256ns)
   5792 		 * Note that a footnote in Intel's documentation says this
   5793 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5794 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5795 		 * that that is also true for the 1024ns units of the other
   5796 		 * interrupt-related timer registers -- so, really, we ought
   5797 		 * to divide this value by 4 when the link speed is low.
   5798 		 *
   5799 		 * XXX implement this division at link speed change!
   5800 		 */
   5801 
   5802 		/*
   5803 		 * For N interrupts/sec, set this value to:
   5804 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5805 		 * absolute and packet timer values to this value
   5806 		 * divided by 4 to get "simple timer" behavior.
   5807 		 */
   5808 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5809 	}
   5810 
   5811 	error = wm_init_txrx_queues(sc);
   5812 	if (error)
   5813 		goto out;
   5814 
   5815 	/* Clear out the VLAN table -- we don't use it (yet). */
   5816 	CSR_WRITE(sc, WMREG_VET, 0);
   5817 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5818 		trynum = 10; /* Due to hw errata */
   5819 	else
   5820 		trynum = 1;
   5821 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5822 		for (j = 0; j < trynum; j++)
   5823 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5824 
   5825 	/*
   5826 	 * Set up flow-control parameters.
   5827 	 *
   5828 	 * XXX Values could probably stand some tuning.
   5829 	 */
   5830 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5831 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5832 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5833 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5834 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5835 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5836 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5837 	}
   5838 
   5839 	sc->sc_fcrtl = FCRTL_DFLT;
   5840 	if (sc->sc_type < WM_T_82543) {
   5841 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5842 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5843 	} else {
   5844 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5845 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5846 	}
   5847 
   5848 	if (sc->sc_type == WM_T_80003)
   5849 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5850 	else
   5851 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5852 
   5853 	/* Writes the control register. */
   5854 	wm_set_vlan(sc);
   5855 
   5856 	if (sc->sc_flags & WM_F_HAS_MII) {
   5857 		uint16_t kmreg;
   5858 
   5859 		switch (sc->sc_type) {
   5860 		case WM_T_80003:
   5861 		case WM_T_ICH8:
   5862 		case WM_T_ICH9:
   5863 		case WM_T_ICH10:
   5864 		case WM_T_PCH:
   5865 		case WM_T_PCH2:
   5866 		case WM_T_PCH_LPT:
   5867 		case WM_T_PCH_SPT:
   5868 		case WM_T_PCH_CNP:
   5869 			/*
   5870 			 * Set the mac to wait the maximum time between each
   5871 			 * iteration and increase the max iterations when
   5872 			 * polling the phy; this fixes erroneous timeouts at
   5873 			 * 10Mbps.
   5874 			 */
   5875 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5876 			    0xFFFF);
   5877 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5878 			    &kmreg);
   5879 			kmreg |= 0x3F;
   5880 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5881 			    kmreg);
   5882 			break;
   5883 		default:
   5884 			break;
   5885 		}
   5886 
   5887 		if (sc->sc_type == WM_T_80003) {
   5888 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5889 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5890 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5891 
   5892 			/* Bypass RX and TX FIFO's */
   5893 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5894 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5895 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5896 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5897 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5898 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5899 		}
   5900 	}
   5901 #if 0
   5902 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5903 #endif
   5904 
   5905 	/* Set up checksum offload parameters. */
   5906 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5907 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5908 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5909 		reg |= RXCSUM_IPOFL;
   5910 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5911 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5912 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5913 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5914 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5915 
   5916 	/* Set registers about MSI-X */
   5917 	if (wm_is_using_msix(sc)) {
   5918 		uint32_t ivar;
   5919 		struct wm_queue *wmq;
   5920 		int qid, qintr_idx;
   5921 
   5922 		if (sc->sc_type == WM_T_82575) {
   5923 			/* Interrupt control */
   5924 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5925 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5926 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5927 
   5928 			/* TX and RX */
   5929 			for (i = 0; i < sc->sc_nqueues; i++) {
   5930 				wmq = &sc->sc_queue[i];
   5931 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5932 				    EITR_TX_QUEUE(wmq->wmq_id)
   5933 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5934 			}
   5935 			/* Link status */
   5936 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5937 			    EITR_OTHER);
   5938 		} else if (sc->sc_type == WM_T_82574) {
   5939 			/* Interrupt control */
   5940 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5941 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5942 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5943 
   5944 			/*
   5945 			 * Workaround issue with spurious interrupts
   5946 			 * in MSI-X mode.
   5947 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5948 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5949 			 */
   5950 			reg = CSR_READ(sc, WMREG_RFCTL);
   5951 			reg |= WMREG_RFCTL_ACKDIS;
   5952 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5953 
   5954 			ivar = 0;
   5955 			/* TX and RX */
   5956 			for (i = 0; i < sc->sc_nqueues; i++) {
   5957 				wmq = &sc->sc_queue[i];
   5958 				qid = wmq->wmq_id;
   5959 				qintr_idx = wmq->wmq_intr_idx;
   5960 
   5961 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5962 				    IVAR_TX_MASK_Q_82574(qid));
   5963 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5964 				    IVAR_RX_MASK_Q_82574(qid));
   5965 			}
   5966 			/* Link status */
   5967 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5968 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5969 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5970 		} else {
   5971 			/* Interrupt control */
   5972 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5973 			    | GPIE_EIAME | GPIE_PBA);
   5974 
   5975 			switch (sc->sc_type) {
   5976 			case WM_T_82580:
   5977 			case WM_T_I350:
   5978 			case WM_T_I354:
   5979 			case WM_T_I210:
   5980 			case WM_T_I211:
   5981 				/* TX and RX */
   5982 				for (i = 0; i < sc->sc_nqueues; i++) {
   5983 					wmq = &sc->sc_queue[i];
   5984 					qid = wmq->wmq_id;
   5985 					qintr_idx = wmq->wmq_intr_idx;
   5986 
   5987 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5988 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5989 					ivar |= __SHIFTIN((qintr_idx
   5990 						| IVAR_VALID),
   5991 					    IVAR_TX_MASK_Q(qid));
   5992 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5993 					ivar |= __SHIFTIN((qintr_idx
   5994 						| IVAR_VALID),
   5995 					    IVAR_RX_MASK_Q(qid));
   5996 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5997 				}
   5998 				break;
   5999 			case WM_T_82576:
   6000 				/* TX and RX */
   6001 				for (i = 0; i < sc->sc_nqueues; i++) {
   6002 					wmq = &sc->sc_queue[i];
   6003 					qid = wmq->wmq_id;
   6004 					qintr_idx = wmq->wmq_intr_idx;
   6005 
   6006 					ivar = CSR_READ(sc,
   6007 					    WMREG_IVAR_Q_82576(qid));
   6008 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6009 					ivar |= __SHIFTIN((qintr_idx
   6010 						| IVAR_VALID),
   6011 					    IVAR_TX_MASK_Q_82576(qid));
   6012 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6013 					ivar |= __SHIFTIN((qintr_idx
   6014 						| IVAR_VALID),
   6015 					    IVAR_RX_MASK_Q_82576(qid));
   6016 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6017 					    ivar);
   6018 				}
   6019 				break;
   6020 			default:
   6021 				break;
   6022 			}
   6023 
   6024 			/* Link status */
   6025 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6026 			    IVAR_MISC_OTHER);
   6027 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6028 		}
   6029 
   6030 		if (wm_is_using_multiqueue(sc)) {
   6031 			wm_init_rss(sc);
   6032 
   6033 			/*
   6034 			** NOTE: Receive Full-Packet Checksum Offload
   6035 			** is mutually exclusive with Multiqueue. However
   6036 			** this is not the same as TCP/IP checksums which
   6037 			** still work.
   6038 			*/
   6039 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6040 			reg |= RXCSUM_PCSD;
   6041 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6042 		}
   6043 	}
   6044 
   6045 	/* Set up the interrupt registers. */
   6046 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6047 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6048 	    ICR_RXO | ICR_RXT0;
   6049 	if (wm_is_using_msix(sc)) {
   6050 		uint32_t mask;
   6051 		struct wm_queue *wmq;
   6052 
   6053 		switch (sc->sc_type) {
   6054 		case WM_T_82574:
   6055 			mask = 0;
   6056 			for (i = 0; i < sc->sc_nqueues; i++) {
   6057 				wmq = &sc->sc_queue[i];
   6058 				mask |= ICR_TXQ(wmq->wmq_id);
   6059 				mask |= ICR_RXQ(wmq->wmq_id);
   6060 			}
   6061 			mask |= ICR_OTHER;
   6062 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6063 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6064 			break;
   6065 		default:
   6066 			if (sc->sc_type == WM_T_82575) {
   6067 				mask = 0;
   6068 				for (i = 0; i < sc->sc_nqueues; i++) {
   6069 					wmq = &sc->sc_queue[i];
   6070 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6071 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6072 				}
   6073 				mask |= EITR_OTHER;
   6074 			} else {
   6075 				mask = 0;
   6076 				for (i = 0; i < sc->sc_nqueues; i++) {
   6077 					wmq = &sc->sc_queue[i];
   6078 					mask |= 1 << wmq->wmq_intr_idx;
   6079 				}
   6080 				mask |= 1 << sc->sc_link_intr_idx;
   6081 			}
   6082 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6083 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6084 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6085 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   6086 			break;
   6087 		}
   6088 	} else
   6089 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6090 
   6091 	/* Set up the inter-packet gap. */
   6092 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6093 
   6094 	if (sc->sc_type >= WM_T_82543) {
   6095 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6096 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6097 			wm_itrs_writereg(sc, wmq);
   6098 		}
   6099 		/*
   6100 		 * Link interrupts occur much less than TX
   6101 		 * interrupts and RX interrupts. So, we don't
   6102 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6103 		 * FreeBSD's if_igb.
   6104 		 */
   6105 	}
   6106 
   6107 	/* Set the VLAN ethernetype. */
   6108 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6109 
   6110 	/*
   6111 	 * Set up the transmit control register; we start out with
   6112 	 * a collision distance suitable for FDX, but update it whe
   6113 	 * we resolve the media type.
   6114 	 */
   6115 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6116 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6117 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6118 	if (sc->sc_type >= WM_T_82571)
   6119 		sc->sc_tctl |= TCTL_MULR;
   6120 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6121 
   6122 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6123 		/* Write TDT after TCTL.EN is set. See the document. */
   6124 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6125 	}
   6126 
   6127 	if (sc->sc_type == WM_T_80003) {
   6128 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6129 		reg &= ~TCTL_EXT_GCEX_MASK;
   6130 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6131 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6132 	}
   6133 
   6134 	/* Set the media. */
   6135 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6136 		goto out;
   6137 
   6138 	/* Configure for OS presence */
   6139 	wm_init_manageability(sc);
   6140 
   6141 	/*
   6142 	 * Set up the receive control register; we actually program the
   6143 	 * register when we set the receive filter. Use multicast address
   6144 	 * offset type 0.
   6145 	 *
   6146 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6147 	 * don't enable that feature.
   6148 	 */
   6149 	sc->sc_mchash_type = 0;
   6150 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6151 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6152 
   6153 	/* 82574 use one buffer extended Rx descriptor. */
   6154 	if (sc->sc_type == WM_T_82574)
   6155 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6156 
   6157 	/*
   6158 	 * The I350 has a bug where it always strips the CRC whether
   6159 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6160 	 */
   6161 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6162 	    || (sc->sc_type == WM_T_I210))
   6163 		sc->sc_rctl |= RCTL_SECRC;
   6164 
   6165 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6166 	    && (ifp->if_mtu > ETHERMTU)) {
   6167 		sc->sc_rctl |= RCTL_LPE;
   6168 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6169 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6170 	}
   6171 
   6172 	if (MCLBYTES == 2048)
   6173 		sc->sc_rctl |= RCTL_2k;
   6174 	else {
   6175 		if (sc->sc_type >= WM_T_82543) {
   6176 			switch (MCLBYTES) {
   6177 			case 4096:
   6178 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6179 				break;
   6180 			case 8192:
   6181 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6182 				break;
   6183 			case 16384:
   6184 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6185 				break;
   6186 			default:
   6187 				panic("wm_init: MCLBYTES %d unsupported",
   6188 				    MCLBYTES);
   6189 				break;
   6190 			}
   6191 		} else
   6192 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6193 	}
   6194 
   6195 	/* Enable ECC */
   6196 	switch (sc->sc_type) {
   6197 	case WM_T_82571:
   6198 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6199 		reg |= PBA_ECC_CORR_EN;
   6200 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6201 		break;
   6202 	case WM_T_PCH_LPT:
   6203 	case WM_T_PCH_SPT:
   6204 	case WM_T_PCH_CNP:
   6205 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6206 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6207 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6208 
   6209 		sc->sc_ctrl |= CTRL_MEHE;
   6210 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6211 		break;
   6212 	default:
   6213 		break;
   6214 	}
   6215 
   6216 	/*
   6217 	 * Set the receive filter.
   6218 	 *
   6219 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6220 	 * the setting of RCTL.EN in wm_set_filter()
   6221 	 */
   6222 	wm_set_filter(sc);
   6223 
   6224 	/* On 575 and later set RDT only if RX enabled */
   6225 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6226 		int qidx;
   6227 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6228 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6229 			for (i = 0; i < WM_NRXDESC; i++) {
   6230 				mutex_enter(rxq->rxq_lock);
   6231 				wm_init_rxdesc(rxq, i);
   6232 				mutex_exit(rxq->rxq_lock);
   6233 
   6234 			}
   6235 		}
   6236 	}
   6237 
   6238 	wm_unset_stopping_flags(sc);
   6239 
   6240 	/* Start the one second link check clock. */
   6241 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6242 
   6243 	/* ...all done! */
   6244 	ifp->if_flags |= IFF_RUNNING;
   6245 	ifp->if_flags &= ~IFF_OACTIVE;
   6246 
   6247  out:
   6248 	/* Save last flags for the callback */
   6249 	sc->sc_if_flags = ifp->if_flags;
   6250 	sc->sc_ec_capenable = ec->ec_capenable;
   6251 	if (error)
   6252 		log(LOG_ERR, "%s: interface not running\n",
   6253 		    device_xname(sc->sc_dev));
   6254 	return error;
   6255 }
   6256 
   6257 /*
   6258  * wm_stop:		[ifnet interface function]
   6259  *
   6260  *	Stop transmission on the interface.
   6261  */
   6262 static void
   6263 wm_stop(struct ifnet *ifp, int disable)
   6264 {
   6265 	struct wm_softc *sc = ifp->if_softc;
   6266 
   6267 	WM_CORE_LOCK(sc);
   6268 	wm_stop_locked(ifp, disable);
   6269 	WM_CORE_UNLOCK(sc);
   6270 }
   6271 
   6272 static void
   6273 wm_stop_locked(struct ifnet *ifp, int disable)
   6274 {
   6275 	struct wm_softc *sc = ifp->if_softc;
   6276 	struct wm_txsoft *txs;
   6277 	int i, qidx;
   6278 
   6279 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6280 		device_xname(sc->sc_dev), __func__));
   6281 	KASSERT(WM_CORE_LOCKED(sc));
   6282 
   6283 	wm_set_stopping_flags(sc);
   6284 
   6285 	/* Stop the one second clock. */
   6286 	callout_stop(&sc->sc_tick_ch);
   6287 
   6288 	/* Stop the 82547 Tx FIFO stall check timer. */
   6289 	if (sc->sc_type == WM_T_82547)
   6290 		callout_stop(&sc->sc_txfifo_ch);
   6291 
   6292 	if (sc->sc_flags & WM_F_HAS_MII) {
   6293 		/* Down the MII. */
   6294 		mii_down(&sc->sc_mii);
   6295 	} else {
   6296 #if 0
   6297 		/* Should we clear PHY's status properly? */
   6298 		wm_reset(sc);
   6299 #endif
   6300 	}
   6301 
   6302 	/* Stop the transmit and receive processes. */
   6303 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6304 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6305 	sc->sc_rctl &= ~RCTL_EN;
   6306 
   6307 	/*
   6308 	 * Clear the interrupt mask to ensure the device cannot assert its
   6309 	 * interrupt line.
   6310 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6311 	 * service any currently pending or shared interrupt.
   6312 	 */
   6313 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6314 	sc->sc_icr = 0;
   6315 	if (wm_is_using_msix(sc)) {
   6316 		if (sc->sc_type != WM_T_82574) {
   6317 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6318 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6319 		} else
   6320 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6321 	}
   6322 
   6323 	/* Release any queued transmit buffers. */
   6324 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6325 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6326 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6327 		mutex_enter(txq->txq_lock);
   6328 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6329 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6330 			txs = &txq->txq_soft[i];
   6331 			if (txs->txs_mbuf != NULL) {
   6332 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6333 				m_freem(txs->txs_mbuf);
   6334 				txs->txs_mbuf = NULL;
   6335 			}
   6336 		}
   6337 		mutex_exit(txq->txq_lock);
   6338 	}
   6339 
   6340 	/* Mark the interface as down and cancel the watchdog timer. */
   6341 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6342 
   6343 	if (disable) {
   6344 		for (i = 0; i < sc->sc_nqueues; i++) {
   6345 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6346 			mutex_enter(rxq->rxq_lock);
   6347 			wm_rxdrain(rxq);
   6348 			mutex_exit(rxq->rxq_lock);
   6349 		}
   6350 	}
   6351 
   6352 #if 0 /* notyet */
   6353 	if (sc->sc_type >= WM_T_82544)
   6354 		CSR_WRITE(sc, WMREG_WUC, 0);
   6355 #endif
   6356 }
   6357 
   6358 static void
   6359 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6360 {
   6361 	struct mbuf *m;
   6362 	int i;
   6363 
   6364 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6365 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6366 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6367 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6368 		    m->m_data, m->m_len, m->m_flags);
   6369 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6370 	    i, i == 1 ? "" : "s");
   6371 }
   6372 
   6373 /*
   6374  * wm_82547_txfifo_stall:
   6375  *
   6376  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6377  *	reset the FIFO pointers, and restart packet transmission.
   6378  */
   6379 static void
   6380 wm_82547_txfifo_stall(void *arg)
   6381 {
   6382 	struct wm_softc *sc = arg;
   6383 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6384 
   6385 	mutex_enter(txq->txq_lock);
   6386 
   6387 	if (txq->txq_stopping)
   6388 		goto out;
   6389 
   6390 	if (txq->txq_fifo_stall) {
   6391 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6392 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6393 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6394 			/*
   6395 			 * Packets have drained.  Stop transmitter, reset
   6396 			 * FIFO pointers, restart transmitter, and kick
   6397 			 * the packet queue.
   6398 			 */
   6399 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6400 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6401 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6402 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6403 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6404 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6405 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6406 			CSR_WRITE_FLUSH(sc);
   6407 
   6408 			txq->txq_fifo_head = 0;
   6409 			txq->txq_fifo_stall = 0;
   6410 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6411 		} else {
   6412 			/*
   6413 			 * Still waiting for packets to drain; try again in
   6414 			 * another tick.
   6415 			 */
   6416 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6417 		}
   6418 	}
   6419 
   6420 out:
   6421 	mutex_exit(txq->txq_lock);
   6422 }
   6423 
   6424 /*
   6425  * wm_82547_txfifo_bugchk:
   6426  *
   6427  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6428  *	prevent enqueueing a packet that would wrap around the end
   6429  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6430  *
   6431  *	We do this by checking the amount of space before the end
   6432  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6433  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6434  *	the internal FIFO pointers to the beginning, and restart
   6435  *	transmission on the interface.
   6436  */
   6437 #define	WM_FIFO_HDR		0x10
   6438 #define	WM_82547_PAD_LEN	0x3e0
   6439 static int
   6440 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6441 {
   6442 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6443 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6444 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6445 
   6446 	/* Just return if already stalled. */
   6447 	if (txq->txq_fifo_stall)
   6448 		return 1;
   6449 
   6450 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6451 		/* Stall only occurs in half-duplex mode. */
   6452 		goto send_packet;
   6453 	}
   6454 
   6455 	if (len >= WM_82547_PAD_LEN + space) {
   6456 		txq->txq_fifo_stall = 1;
   6457 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6458 		return 1;
   6459 	}
   6460 
   6461  send_packet:
   6462 	txq->txq_fifo_head += len;
   6463 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6464 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6465 
   6466 	return 0;
   6467 }
   6468 
   6469 static int
   6470 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6471 {
   6472 	int error;
   6473 
   6474 	/*
   6475 	 * Allocate the control data structures, and create and load the
   6476 	 * DMA map for it.
   6477 	 *
   6478 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6479 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6480 	 * both sets within the same 4G segment.
   6481 	 */
   6482 	if (sc->sc_type < WM_T_82544)
   6483 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6484 	else
   6485 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6486 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6487 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6488 	else
   6489 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6490 
   6491 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6492 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6493 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6494 		aprint_error_dev(sc->sc_dev,
   6495 		    "unable to allocate TX control data, error = %d\n",
   6496 		    error);
   6497 		goto fail_0;
   6498 	}
   6499 
   6500 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6501 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6502 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6503 		aprint_error_dev(sc->sc_dev,
   6504 		    "unable to map TX control data, error = %d\n", error);
   6505 		goto fail_1;
   6506 	}
   6507 
   6508 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6509 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6510 		aprint_error_dev(sc->sc_dev,
   6511 		    "unable to create TX control data DMA map, error = %d\n",
   6512 		    error);
   6513 		goto fail_2;
   6514 	}
   6515 
   6516 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6517 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6518 		aprint_error_dev(sc->sc_dev,
   6519 		    "unable to load TX control data DMA map, error = %d\n",
   6520 		    error);
   6521 		goto fail_3;
   6522 	}
   6523 
   6524 	return 0;
   6525 
   6526  fail_3:
   6527 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6528  fail_2:
   6529 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6530 	    WM_TXDESCS_SIZE(txq));
   6531  fail_1:
   6532 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6533  fail_0:
   6534 	return error;
   6535 }
   6536 
   6537 static void
   6538 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6539 {
   6540 
   6541 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6542 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6543 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6544 	    WM_TXDESCS_SIZE(txq));
   6545 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6546 }
   6547 
   6548 static int
   6549 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6550 {
   6551 	int error;
   6552 	size_t rxq_descs_size;
   6553 
   6554 	/*
   6555 	 * Allocate the control data structures, and create and load the
   6556 	 * DMA map for it.
   6557 	 *
   6558 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6559 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6560 	 * both sets within the same 4G segment.
   6561 	 */
   6562 	rxq->rxq_ndesc = WM_NRXDESC;
   6563 	if (sc->sc_type == WM_T_82574)
   6564 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6565 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6566 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6567 	else
   6568 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6569 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6570 
   6571 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6572 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6573 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6574 		aprint_error_dev(sc->sc_dev,
   6575 		    "unable to allocate RX control data, error = %d\n",
   6576 		    error);
   6577 		goto fail_0;
   6578 	}
   6579 
   6580 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6581 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6582 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6583 		aprint_error_dev(sc->sc_dev,
   6584 		    "unable to map RX control data, error = %d\n", error);
   6585 		goto fail_1;
   6586 	}
   6587 
   6588 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6589 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6590 		aprint_error_dev(sc->sc_dev,
   6591 		    "unable to create RX control data DMA map, error = %d\n",
   6592 		    error);
   6593 		goto fail_2;
   6594 	}
   6595 
   6596 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6597 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6598 		aprint_error_dev(sc->sc_dev,
   6599 		    "unable to load RX control data DMA map, error = %d\n",
   6600 		    error);
   6601 		goto fail_3;
   6602 	}
   6603 
   6604 	return 0;
   6605 
   6606  fail_3:
   6607 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6608  fail_2:
   6609 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6610 	    rxq_descs_size);
   6611  fail_1:
   6612 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6613  fail_0:
   6614 	return error;
   6615 }
   6616 
   6617 static void
   6618 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6619 {
   6620 
   6621 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6622 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6623 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6624 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6625 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6626 }
   6627 
   6628 
   6629 static int
   6630 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6631 {
   6632 	int i, error;
   6633 
   6634 	/* Create the transmit buffer DMA maps. */
   6635 	WM_TXQUEUELEN(txq) =
   6636 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6637 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6638 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6639 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6640 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6641 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6642 			aprint_error_dev(sc->sc_dev,
   6643 			    "unable to create Tx DMA map %d, error = %d\n",
   6644 			    i, error);
   6645 			goto fail;
   6646 		}
   6647 	}
   6648 
   6649 	return 0;
   6650 
   6651  fail:
   6652 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6653 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6654 			bus_dmamap_destroy(sc->sc_dmat,
   6655 			    txq->txq_soft[i].txs_dmamap);
   6656 	}
   6657 	return error;
   6658 }
   6659 
   6660 static void
   6661 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6662 {
   6663 	int i;
   6664 
   6665 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6666 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6667 			bus_dmamap_destroy(sc->sc_dmat,
   6668 			    txq->txq_soft[i].txs_dmamap);
   6669 	}
   6670 }
   6671 
   6672 static int
   6673 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6674 {
   6675 	int i, error;
   6676 
   6677 	/* Create the receive buffer DMA maps. */
   6678 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6679 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6680 			    MCLBYTES, 0, 0,
   6681 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6682 			aprint_error_dev(sc->sc_dev,
   6683 			    "unable to create Rx DMA map %d error = %d\n",
   6684 			    i, error);
   6685 			goto fail;
   6686 		}
   6687 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6688 	}
   6689 
   6690 	return 0;
   6691 
   6692  fail:
   6693 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6694 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6695 			bus_dmamap_destroy(sc->sc_dmat,
   6696 			    rxq->rxq_soft[i].rxs_dmamap);
   6697 	}
   6698 	return error;
   6699 }
   6700 
   6701 static void
   6702 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6703 {
   6704 	int i;
   6705 
   6706 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6707 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6708 			bus_dmamap_destroy(sc->sc_dmat,
   6709 			    rxq->rxq_soft[i].rxs_dmamap);
   6710 	}
   6711 }
   6712 
   6713 /*
   6714  * wm_alloc_quques:
   6715  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6716  */
   6717 static int
   6718 wm_alloc_txrx_queues(struct wm_softc *sc)
   6719 {
   6720 	int i, error, tx_done, rx_done;
   6721 
   6722 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6723 	    KM_SLEEP);
   6724 	if (sc->sc_queue == NULL) {
   6725 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6726 		error = ENOMEM;
   6727 		goto fail_0;
   6728 	}
   6729 
   6730 	/* For transmission */
   6731 	error = 0;
   6732 	tx_done = 0;
   6733 	for (i = 0; i < sc->sc_nqueues; i++) {
   6734 #ifdef WM_EVENT_COUNTERS
   6735 		int j;
   6736 		const char *xname;
   6737 #endif
   6738 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6739 		txq->txq_sc = sc;
   6740 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6741 
   6742 		error = wm_alloc_tx_descs(sc, txq);
   6743 		if (error)
   6744 			break;
   6745 		error = wm_alloc_tx_buffer(sc, txq);
   6746 		if (error) {
   6747 			wm_free_tx_descs(sc, txq);
   6748 			break;
   6749 		}
   6750 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6751 		if (txq->txq_interq == NULL) {
   6752 			wm_free_tx_descs(sc, txq);
   6753 			wm_free_tx_buffer(sc, txq);
   6754 			error = ENOMEM;
   6755 			break;
   6756 		}
   6757 
   6758 #ifdef WM_EVENT_COUNTERS
   6759 		xname = device_xname(sc->sc_dev);
   6760 
   6761 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6762 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6763 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6764 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6765 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6766 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6767 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6768 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6769 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6770 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6771 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6772 
   6773 		for (j = 0; j < WM_NTXSEGS; j++) {
   6774 			snprintf(txq->txq_txseg_evcnt_names[j],
   6775 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6776 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6777 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6778 		}
   6779 
   6780 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6781 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6782 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6783 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6784 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6785 #endif /* WM_EVENT_COUNTERS */
   6786 
   6787 		tx_done++;
   6788 	}
   6789 	if (error)
   6790 		goto fail_1;
   6791 
   6792 	/* For recieve */
   6793 	error = 0;
   6794 	rx_done = 0;
   6795 	for (i = 0; i < sc->sc_nqueues; i++) {
   6796 #ifdef WM_EVENT_COUNTERS
   6797 		const char *xname;
   6798 #endif
   6799 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6800 		rxq->rxq_sc = sc;
   6801 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6802 
   6803 		error = wm_alloc_rx_descs(sc, rxq);
   6804 		if (error)
   6805 			break;
   6806 
   6807 		error = wm_alloc_rx_buffer(sc, rxq);
   6808 		if (error) {
   6809 			wm_free_rx_descs(sc, rxq);
   6810 			break;
   6811 		}
   6812 
   6813 #ifdef WM_EVENT_COUNTERS
   6814 		xname = device_xname(sc->sc_dev);
   6815 
   6816 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6817 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6818 
   6819 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6820 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6821 #endif /* WM_EVENT_COUNTERS */
   6822 
   6823 		rx_done++;
   6824 	}
   6825 	if (error)
   6826 		goto fail_2;
   6827 
   6828 	return 0;
   6829 
   6830  fail_2:
   6831 	for (i = 0; i < rx_done; i++) {
   6832 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6833 		wm_free_rx_buffer(sc, rxq);
   6834 		wm_free_rx_descs(sc, rxq);
   6835 		if (rxq->rxq_lock)
   6836 			mutex_obj_free(rxq->rxq_lock);
   6837 	}
   6838  fail_1:
   6839 	for (i = 0; i < tx_done; i++) {
   6840 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6841 		pcq_destroy(txq->txq_interq);
   6842 		wm_free_tx_buffer(sc, txq);
   6843 		wm_free_tx_descs(sc, txq);
   6844 		if (txq->txq_lock)
   6845 			mutex_obj_free(txq->txq_lock);
   6846 	}
   6847 
   6848 	kmem_free(sc->sc_queue,
   6849 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6850  fail_0:
   6851 	return error;
   6852 }
   6853 
   6854 /*
   6855  * wm_free_quques:
   6856  *	Free {tx,rx}descs and {tx,rx} buffers
   6857  */
   6858 static void
   6859 wm_free_txrx_queues(struct wm_softc *sc)
   6860 {
   6861 	int i;
   6862 
   6863 	for (i = 0; i < sc->sc_nqueues; i++) {
   6864 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6865 
   6866 #ifdef WM_EVENT_COUNTERS
   6867 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6868 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6869 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6870 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6871 #endif /* WM_EVENT_COUNTERS */
   6872 
   6873 		wm_free_rx_buffer(sc, rxq);
   6874 		wm_free_rx_descs(sc, rxq);
   6875 		if (rxq->rxq_lock)
   6876 			mutex_obj_free(rxq->rxq_lock);
   6877 	}
   6878 
   6879 	for (i = 0; i < sc->sc_nqueues; i++) {
   6880 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6881 		struct mbuf *m;
   6882 #ifdef WM_EVENT_COUNTERS
   6883 		int j;
   6884 
   6885 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6886 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6887 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6888 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6889 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6890 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6891 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6892 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6893 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6894 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6895 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6896 
   6897 		for (j = 0; j < WM_NTXSEGS; j++)
   6898 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6899 
   6900 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6901 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6902 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6903 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6904 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6905 #endif /* WM_EVENT_COUNTERS */
   6906 
   6907 		/* Drain txq_interq */
   6908 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6909 			m_freem(m);
   6910 		pcq_destroy(txq->txq_interq);
   6911 
   6912 		wm_free_tx_buffer(sc, txq);
   6913 		wm_free_tx_descs(sc, txq);
   6914 		if (txq->txq_lock)
   6915 			mutex_obj_free(txq->txq_lock);
   6916 	}
   6917 
   6918 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6919 }
   6920 
   6921 static void
   6922 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6923 {
   6924 
   6925 	KASSERT(mutex_owned(txq->txq_lock));
   6926 
   6927 	/* Initialize the transmit descriptor ring. */
   6928 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6929 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6930 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6931 	txq->txq_free = WM_NTXDESC(txq);
   6932 	txq->txq_next = 0;
   6933 }
   6934 
   6935 static void
   6936 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6937     struct wm_txqueue *txq)
   6938 {
   6939 
   6940 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6941 		device_xname(sc->sc_dev), __func__));
   6942 	KASSERT(mutex_owned(txq->txq_lock));
   6943 
   6944 	if (sc->sc_type < WM_T_82543) {
   6945 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6946 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6947 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6948 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6949 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6950 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6951 	} else {
   6952 		int qid = wmq->wmq_id;
   6953 
   6954 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6955 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6956 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6957 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6958 
   6959 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6960 			/*
   6961 			 * Don't write TDT before TCTL.EN is set.
   6962 			 * See the document.
   6963 			 */
   6964 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6965 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6966 			    | TXDCTL_WTHRESH(0));
   6967 		else {
   6968 			/* XXX should update with AIM? */
   6969 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6970 			if (sc->sc_type >= WM_T_82540) {
   6971 				/* Should be the same */
   6972 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6973 			}
   6974 
   6975 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6976 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6977 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6978 		}
   6979 	}
   6980 }
   6981 
   6982 static void
   6983 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6984 {
   6985 	int i;
   6986 
   6987 	KASSERT(mutex_owned(txq->txq_lock));
   6988 
   6989 	/* Initialize the transmit job descriptors. */
   6990 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6991 		txq->txq_soft[i].txs_mbuf = NULL;
   6992 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6993 	txq->txq_snext = 0;
   6994 	txq->txq_sdirty = 0;
   6995 }
   6996 
   6997 static void
   6998 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6999     struct wm_txqueue *txq)
   7000 {
   7001 
   7002 	KASSERT(mutex_owned(txq->txq_lock));
   7003 
   7004 	/*
   7005 	 * Set up some register offsets that are different between
   7006 	 * the i82542 and the i82543 and later chips.
   7007 	 */
   7008 	if (sc->sc_type < WM_T_82543)
   7009 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7010 	else
   7011 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7012 
   7013 	wm_init_tx_descs(sc, txq);
   7014 	wm_init_tx_regs(sc, wmq, txq);
   7015 	wm_init_tx_buffer(sc, txq);
   7016 
   7017 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7018 	txq->txq_sending = false;
   7019 }
   7020 
   7021 static void
   7022 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7023     struct wm_rxqueue *rxq)
   7024 {
   7025 
   7026 	KASSERT(mutex_owned(rxq->rxq_lock));
   7027 
   7028 	/*
   7029 	 * Initialize the receive descriptor and receive job
   7030 	 * descriptor rings.
   7031 	 */
   7032 	if (sc->sc_type < WM_T_82543) {
   7033 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7034 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7035 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7036 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7037 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7038 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7039 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7040 
   7041 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7042 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7043 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7044 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7045 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7046 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7047 	} else {
   7048 		int qid = wmq->wmq_id;
   7049 
   7050 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7051 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7052 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7053 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7054 
   7055 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7056 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7057 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7058 
   7059 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7060 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7061 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7062 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7063 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7064 			    | RXDCTL_WTHRESH(1));
   7065 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7066 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7067 		} else {
   7068 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7069 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7070 			/* XXX should update with AIM? */
   7071 			CSR_WRITE(sc, WMREG_RDTR,
   7072 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7073 			/* MUST be same */
   7074 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7075 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7076 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7077 		}
   7078 	}
   7079 }
   7080 
   7081 static int
   7082 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7083 {
   7084 	struct wm_rxsoft *rxs;
   7085 	int error, i;
   7086 
   7087 	KASSERT(mutex_owned(rxq->rxq_lock));
   7088 
   7089 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7090 		rxs = &rxq->rxq_soft[i];
   7091 		if (rxs->rxs_mbuf == NULL) {
   7092 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7093 				log(LOG_ERR, "%s: unable to allocate or map "
   7094 				    "rx buffer %d, error = %d\n",
   7095 				    device_xname(sc->sc_dev), i, error);
   7096 				/*
   7097 				 * XXX Should attempt to run with fewer receive
   7098 				 * XXX buffers instead of just failing.
   7099 				 */
   7100 				wm_rxdrain(rxq);
   7101 				return ENOMEM;
   7102 			}
   7103 		} else {
   7104 			/*
   7105 			 * For 82575 and 82576, the RX descriptors must be
   7106 			 * initialized after the setting of RCTL.EN in
   7107 			 * wm_set_filter()
   7108 			 */
   7109 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7110 				wm_init_rxdesc(rxq, i);
   7111 		}
   7112 	}
   7113 	rxq->rxq_ptr = 0;
   7114 	rxq->rxq_discard = 0;
   7115 	WM_RXCHAIN_RESET(rxq);
   7116 
   7117 	return 0;
   7118 }
   7119 
   7120 static int
   7121 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7122     struct wm_rxqueue *rxq)
   7123 {
   7124 
   7125 	KASSERT(mutex_owned(rxq->rxq_lock));
   7126 
   7127 	/*
   7128 	 * Set up some register offsets that are different between
   7129 	 * the i82542 and the i82543 and later chips.
   7130 	 */
   7131 	if (sc->sc_type < WM_T_82543)
   7132 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7133 	else
   7134 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7135 
   7136 	wm_init_rx_regs(sc, wmq, rxq);
   7137 	return wm_init_rx_buffer(sc, rxq);
   7138 }
   7139 
   7140 /*
   7141  * wm_init_quques:
   7142  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7143  */
   7144 static int
   7145 wm_init_txrx_queues(struct wm_softc *sc)
   7146 {
   7147 	int i, error = 0;
   7148 
   7149 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7150 		device_xname(sc->sc_dev), __func__));
   7151 
   7152 	for (i = 0; i < sc->sc_nqueues; i++) {
   7153 		struct wm_queue *wmq = &sc->sc_queue[i];
   7154 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7155 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7156 
   7157 		/*
   7158 		 * TODO
   7159 		 * Currently, use constant variable instead of AIM.
   7160 		 * Furthermore, the interrupt interval of multiqueue which use
   7161 		 * polling mode is less than default value.
   7162 		 * More tuning and AIM are required.
   7163 		 */
   7164 		if (wm_is_using_multiqueue(sc))
   7165 			wmq->wmq_itr = 50;
   7166 		else
   7167 			wmq->wmq_itr = sc->sc_itr_init;
   7168 		wmq->wmq_set_itr = true;
   7169 
   7170 		mutex_enter(txq->txq_lock);
   7171 		wm_init_tx_queue(sc, wmq, txq);
   7172 		mutex_exit(txq->txq_lock);
   7173 
   7174 		mutex_enter(rxq->rxq_lock);
   7175 		error = wm_init_rx_queue(sc, wmq, rxq);
   7176 		mutex_exit(rxq->rxq_lock);
   7177 		if (error)
   7178 			break;
   7179 	}
   7180 
   7181 	return error;
   7182 }
   7183 
   7184 /*
   7185  * wm_tx_offload:
   7186  *
   7187  *	Set up TCP/IP checksumming parameters for the
   7188  *	specified packet.
   7189  */
   7190 static int
   7191 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7192     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7193 {
   7194 	struct mbuf *m0 = txs->txs_mbuf;
   7195 	struct livengood_tcpip_ctxdesc *t;
   7196 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7197 	uint32_t ipcse;
   7198 	struct ether_header *eh;
   7199 	int offset, iphl;
   7200 	uint8_t fields;
   7201 
   7202 	/*
   7203 	 * XXX It would be nice if the mbuf pkthdr had offset
   7204 	 * fields for the protocol headers.
   7205 	 */
   7206 
   7207 	eh = mtod(m0, struct ether_header *);
   7208 	switch (htons(eh->ether_type)) {
   7209 	case ETHERTYPE_IP:
   7210 	case ETHERTYPE_IPV6:
   7211 		offset = ETHER_HDR_LEN;
   7212 		break;
   7213 
   7214 	case ETHERTYPE_VLAN:
   7215 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7216 		break;
   7217 
   7218 	default:
   7219 		/* Don't support this protocol or encapsulation. */
   7220 		*fieldsp = 0;
   7221 		*cmdp = 0;
   7222 		return 0;
   7223 	}
   7224 
   7225 	if ((m0->m_pkthdr.csum_flags &
   7226 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7227 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7228 	} else
   7229 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7230 
   7231 	ipcse = offset + iphl - 1;
   7232 
   7233 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7234 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7235 	seg = 0;
   7236 	fields = 0;
   7237 
   7238 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7239 		int hlen = offset + iphl;
   7240 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7241 
   7242 		if (__predict_false(m0->m_len <
   7243 				    (hlen + sizeof(struct tcphdr)))) {
   7244 			/*
   7245 			 * TCP/IP headers are not in the first mbuf; we need
   7246 			 * to do this the slow and painful way. Let's just
   7247 			 * hope this doesn't happen very often.
   7248 			 */
   7249 			struct tcphdr th;
   7250 
   7251 			WM_Q_EVCNT_INCR(txq, tsopain);
   7252 
   7253 			m_copydata(m0, hlen, sizeof(th), &th);
   7254 			if (v4) {
   7255 				struct ip ip;
   7256 
   7257 				m_copydata(m0, offset, sizeof(ip), &ip);
   7258 				ip.ip_len = 0;
   7259 				m_copyback(m0,
   7260 				    offset + offsetof(struct ip, ip_len),
   7261 				    sizeof(ip.ip_len), &ip.ip_len);
   7262 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7263 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7264 			} else {
   7265 				struct ip6_hdr ip6;
   7266 
   7267 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7268 				ip6.ip6_plen = 0;
   7269 				m_copyback(m0,
   7270 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7271 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7272 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7273 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7274 			}
   7275 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7276 			    sizeof(th.th_sum), &th.th_sum);
   7277 
   7278 			hlen += th.th_off << 2;
   7279 		} else {
   7280 			/*
   7281 			 * TCP/IP headers are in the first mbuf; we can do
   7282 			 * this the easy way.
   7283 			 */
   7284 			struct tcphdr *th;
   7285 
   7286 			if (v4) {
   7287 				struct ip *ip =
   7288 				    (void *)(mtod(m0, char *) + offset);
   7289 				th = (void *)(mtod(m0, char *) + hlen);
   7290 
   7291 				ip->ip_len = 0;
   7292 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7293 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7294 			} else {
   7295 				struct ip6_hdr *ip6 =
   7296 				    (void *)(mtod(m0, char *) + offset);
   7297 				th = (void *)(mtod(m0, char *) + hlen);
   7298 
   7299 				ip6->ip6_plen = 0;
   7300 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7301 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7302 			}
   7303 			hlen += th->th_off << 2;
   7304 		}
   7305 
   7306 		if (v4) {
   7307 			WM_Q_EVCNT_INCR(txq, tso);
   7308 			cmdlen |= WTX_TCPIP_CMD_IP;
   7309 		} else {
   7310 			WM_Q_EVCNT_INCR(txq, tso6);
   7311 			ipcse = 0;
   7312 		}
   7313 		cmd |= WTX_TCPIP_CMD_TSE;
   7314 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7315 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7316 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7317 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7318 	}
   7319 
   7320 	/*
   7321 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7322 	 * offload feature, if we load the context descriptor, we
   7323 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7324 	 */
   7325 
   7326 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7327 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7328 	    WTX_TCPIP_IPCSE(ipcse);
   7329 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7330 		WM_Q_EVCNT_INCR(txq, ipsum);
   7331 		fields |= WTX_IXSM;
   7332 	}
   7333 
   7334 	offset += iphl;
   7335 
   7336 	if (m0->m_pkthdr.csum_flags &
   7337 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7338 		WM_Q_EVCNT_INCR(txq, tusum);
   7339 		fields |= WTX_TXSM;
   7340 		tucs = WTX_TCPIP_TUCSS(offset) |
   7341 		    WTX_TCPIP_TUCSO(offset +
   7342 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7343 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7344 	} else if ((m0->m_pkthdr.csum_flags &
   7345 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7346 		WM_Q_EVCNT_INCR(txq, tusum6);
   7347 		fields |= WTX_TXSM;
   7348 		tucs = WTX_TCPIP_TUCSS(offset) |
   7349 		    WTX_TCPIP_TUCSO(offset +
   7350 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7351 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7352 	} else {
   7353 		/* Just initialize it to a valid TCP context. */
   7354 		tucs = WTX_TCPIP_TUCSS(offset) |
   7355 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7356 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7357 	}
   7358 
   7359 	/*
   7360 	 * We don't have to write context descriptor for every packet
   7361 	 * except for 82574. For 82574, we must write context descriptor
   7362 	 * for every packet when we use two descriptor queues.
   7363 	 * It would be overhead to write context descriptor for every packet,
   7364 	 * however it does not cause problems.
   7365 	 */
   7366 	/* Fill in the context descriptor. */
   7367 	t = (struct livengood_tcpip_ctxdesc *)
   7368 	    &txq->txq_descs[txq->txq_next];
   7369 	t->tcpip_ipcs = htole32(ipcs);
   7370 	t->tcpip_tucs = htole32(tucs);
   7371 	t->tcpip_cmdlen = htole32(cmdlen);
   7372 	t->tcpip_seg = htole32(seg);
   7373 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7374 
   7375 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7376 	txs->txs_ndesc++;
   7377 
   7378 	*cmdp = cmd;
   7379 	*fieldsp = fields;
   7380 
   7381 	return 0;
   7382 }
   7383 
   7384 static inline int
   7385 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7386 {
   7387 	struct wm_softc *sc = ifp->if_softc;
   7388 	u_int cpuid = cpu_index(curcpu());
   7389 
   7390 	/*
   7391 	 * Currently, simple distribute strategy.
   7392 	 * TODO:
   7393 	 * distribute by flowid(RSS has value).
   7394 	 */
   7395 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7396 }
   7397 
   7398 /*
   7399  * wm_start:		[ifnet interface function]
   7400  *
   7401  *	Start packet transmission on the interface.
   7402  */
   7403 static void
   7404 wm_start(struct ifnet *ifp)
   7405 {
   7406 	struct wm_softc *sc = ifp->if_softc;
   7407 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7408 
   7409 #ifdef WM_MPSAFE
   7410 	KASSERT(if_is_mpsafe(ifp));
   7411 #endif
   7412 	/*
   7413 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7414 	 */
   7415 
   7416 	mutex_enter(txq->txq_lock);
   7417 	if (!txq->txq_stopping)
   7418 		wm_start_locked(ifp);
   7419 	mutex_exit(txq->txq_lock);
   7420 }
   7421 
   7422 static void
   7423 wm_start_locked(struct ifnet *ifp)
   7424 {
   7425 	struct wm_softc *sc = ifp->if_softc;
   7426 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7427 
   7428 	wm_send_common_locked(ifp, txq, false);
   7429 }
   7430 
   7431 static int
   7432 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7433 {
   7434 	int qid;
   7435 	struct wm_softc *sc = ifp->if_softc;
   7436 	struct wm_txqueue *txq;
   7437 
   7438 	qid = wm_select_txqueue(ifp, m);
   7439 	txq = &sc->sc_queue[qid].wmq_txq;
   7440 
   7441 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7442 		m_freem(m);
   7443 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7444 		return ENOBUFS;
   7445 	}
   7446 
   7447 	/* XXX NOMPSAFE: ifp->if_data should be percpu. */
   7448 	ifp->if_obytes += m->m_pkthdr.len;
   7449 	if (m->m_flags & M_MCAST)
   7450 		ifp->if_omcasts++;
   7451 
   7452 	if (mutex_tryenter(txq->txq_lock)) {
   7453 		if (!txq->txq_stopping)
   7454 			wm_transmit_locked(ifp, txq);
   7455 		mutex_exit(txq->txq_lock);
   7456 	}
   7457 
   7458 	return 0;
   7459 }
   7460 
   7461 static void
   7462 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7463 {
   7464 
   7465 	wm_send_common_locked(ifp, txq, true);
   7466 }
   7467 
   7468 static void
   7469 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7470     bool is_transmit)
   7471 {
   7472 	struct wm_softc *sc = ifp->if_softc;
   7473 	struct mbuf *m0;
   7474 	struct wm_txsoft *txs;
   7475 	bus_dmamap_t dmamap;
   7476 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7477 	bus_addr_t curaddr;
   7478 	bus_size_t seglen, curlen;
   7479 	uint32_t cksumcmd;
   7480 	uint8_t cksumfields;
   7481 	bool remap = true;
   7482 
   7483 	KASSERT(mutex_owned(txq->txq_lock));
   7484 
   7485 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7486 		return;
   7487 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7488 		return;
   7489 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7490 		return;
   7491 
   7492 	/* Remember the previous number of free descriptors. */
   7493 	ofree = txq->txq_free;
   7494 
   7495 	/*
   7496 	 * Loop through the send queue, setting up transmit descriptors
   7497 	 * until we drain the queue, or use up all available transmit
   7498 	 * descriptors.
   7499 	 */
   7500 	for (;;) {
   7501 		m0 = NULL;
   7502 
   7503 		/* Get a work queue entry. */
   7504 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7505 			wm_txeof(txq, UINT_MAX);
   7506 			if (txq->txq_sfree == 0) {
   7507 				DPRINTF(WM_DEBUG_TX,
   7508 				    ("%s: TX: no free job descriptors\n",
   7509 					device_xname(sc->sc_dev)));
   7510 				WM_Q_EVCNT_INCR(txq, txsstall);
   7511 				break;
   7512 			}
   7513 		}
   7514 
   7515 		/* Grab a packet off the queue. */
   7516 		if (is_transmit)
   7517 			m0 = pcq_get(txq->txq_interq);
   7518 		else
   7519 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7520 		if (m0 == NULL)
   7521 			break;
   7522 
   7523 		DPRINTF(WM_DEBUG_TX,
   7524 		    ("%s: TX: have packet to transmit: %p\n",
   7525 			device_xname(sc->sc_dev), m0));
   7526 
   7527 		txs = &txq->txq_soft[txq->txq_snext];
   7528 		dmamap = txs->txs_dmamap;
   7529 
   7530 		use_tso = (m0->m_pkthdr.csum_flags &
   7531 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7532 
   7533 		/*
   7534 		 * So says the Linux driver:
   7535 		 * The controller does a simple calculation to make sure
   7536 		 * there is enough room in the FIFO before initiating the
   7537 		 * DMA for each buffer. The calc is:
   7538 		 *	4 = ceil(buffer len / MSS)
   7539 		 * To make sure we don't overrun the FIFO, adjust the max
   7540 		 * buffer len if the MSS drops.
   7541 		 */
   7542 		dmamap->dm_maxsegsz =
   7543 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7544 		    ? m0->m_pkthdr.segsz << 2
   7545 		    : WTX_MAX_LEN;
   7546 
   7547 		/*
   7548 		 * Load the DMA map.  If this fails, the packet either
   7549 		 * didn't fit in the allotted number of segments, or we
   7550 		 * were short on resources.  For the too-many-segments
   7551 		 * case, we simply report an error and drop the packet,
   7552 		 * since we can't sanely copy a jumbo packet to a single
   7553 		 * buffer.
   7554 		 */
   7555 retry:
   7556 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7557 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7558 		if (__predict_false(error)) {
   7559 			if (error == EFBIG) {
   7560 				if (remap == true) {
   7561 					struct mbuf *m;
   7562 
   7563 					remap = false;
   7564 					m = m_defrag(m0, M_NOWAIT);
   7565 					if (m != NULL) {
   7566 						WM_Q_EVCNT_INCR(txq, defrag);
   7567 						m0 = m;
   7568 						goto retry;
   7569 					}
   7570 				}
   7571 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7572 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7573 				    "DMA segments, dropping...\n",
   7574 				    device_xname(sc->sc_dev));
   7575 				wm_dump_mbuf_chain(sc, m0);
   7576 				m_freem(m0);
   7577 				continue;
   7578 			}
   7579 			/* Short on resources, just stop for now. */
   7580 			DPRINTF(WM_DEBUG_TX,
   7581 			    ("%s: TX: dmamap load failed: %d\n",
   7582 				device_xname(sc->sc_dev), error));
   7583 			break;
   7584 		}
   7585 
   7586 		segs_needed = dmamap->dm_nsegs;
   7587 		if (use_tso) {
   7588 			/* For sentinel descriptor; see below. */
   7589 			segs_needed++;
   7590 		}
   7591 
   7592 		/*
   7593 		 * Ensure we have enough descriptors free to describe
   7594 		 * the packet. Note, we always reserve one descriptor
   7595 		 * at the end of the ring due to the semantics of the
   7596 		 * TDT register, plus one more in the event we need
   7597 		 * to load offload context.
   7598 		 */
   7599 		if (segs_needed > txq->txq_free - 2) {
   7600 			/*
   7601 			 * Not enough free descriptors to transmit this
   7602 			 * packet.  We haven't committed anything yet,
   7603 			 * so just unload the DMA map, put the packet
   7604 			 * pack on the queue, and punt. Notify the upper
   7605 			 * layer that there are no more slots left.
   7606 			 */
   7607 			DPRINTF(WM_DEBUG_TX,
   7608 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7609 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7610 				segs_needed, txq->txq_free - 1));
   7611 			if (!is_transmit)
   7612 				ifp->if_flags |= IFF_OACTIVE;
   7613 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7614 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7615 			WM_Q_EVCNT_INCR(txq, txdstall);
   7616 			break;
   7617 		}
   7618 
   7619 		/*
   7620 		 * Check for 82547 Tx FIFO bug. We need to do this
   7621 		 * once we know we can transmit the packet, since we
   7622 		 * do some internal FIFO space accounting here.
   7623 		 */
   7624 		if (sc->sc_type == WM_T_82547 &&
   7625 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7626 			DPRINTF(WM_DEBUG_TX,
   7627 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7628 				device_xname(sc->sc_dev)));
   7629 			if (!is_transmit)
   7630 				ifp->if_flags |= IFF_OACTIVE;
   7631 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7632 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7633 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7634 			break;
   7635 		}
   7636 
   7637 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7638 
   7639 		DPRINTF(WM_DEBUG_TX,
   7640 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7641 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7642 
   7643 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7644 
   7645 		/*
   7646 		 * Store a pointer to the packet so that we can free it
   7647 		 * later.
   7648 		 *
   7649 		 * Initially, we consider the number of descriptors the
   7650 		 * packet uses the number of DMA segments.  This may be
   7651 		 * incremented by 1 if we do checksum offload (a descriptor
   7652 		 * is used to set the checksum context).
   7653 		 */
   7654 		txs->txs_mbuf = m0;
   7655 		txs->txs_firstdesc = txq->txq_next;
   7656 		txs->txs_ndesc = segs_needed;
   7657 
   7658 		/* Set up offload parameters for this packet. */
   7659 		if (m0->m_pkthdr.csum_flags &
   7660 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7661 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7662 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7663 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7664 					  &cksumfields) != 0) {
   7665 				/* Error message already displayed. */
   7666 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7667 				continue;
   7668 			}
   7669 		} else {
   7670 			cksumcmd = 0;
   7671 			cksumfields = 0;
   7672 		}
   7673 
   7674 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7675 
   7676 		/* Sync the DMA map. */
   7677 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7678 		    BUS_DMASYNC_PREWRITE);
   7679 
   7680 		/* Initialize the transmit descriptor. */
   7681 		for (nexttx = txq->txq_next, seg = 0;
   7682 		     seg < dmamap->dm_nsegs; seg++) {
   7683 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7684 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7685 			     seglen != 0;
   7686 			     curaddr += curlen, seglen -= curlen,
   7687 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7688 				curlen = seglen;
   7689 
   7690 				/*
   7691 				 * So says the Linux driver:
   7692 				 * Work around for premature descriptor
   7693 				 * write-backs in TSO mode.  Append a
   7694 				 * 4-byte sentinel descriptor.
   7695 				 */
   7696 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7697 				    curlen > 8)
   7698 					curlen -= 4;
   7699 
   7700 				wm_set_dma_addr(
   7701 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7702 				txq->txq_descs[nexttx].wtx_cmdlen
   7703 				    = htole32(cksumcmd | curlen);
   7704 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7705 				    = 0;
   7706 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7707 				    = cksumfields;
   7708 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7709 				lasttx = nexttx;
   7710 
   7711 				DPRINTF(WM_DEBUG_TX,
   7712 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7713 					"len %#04zx\n",
   7714 					device_xname(sc->sc_dev), nexttx,
   7715 					(uint64_t)curaddr, curlen));
   7716 			}
   7717 		}
   7718 
   7719 		KASSERT(lasttx != -1);
   7720 
   7721 		/*
   7722 		 * Set up the command byte on the last descriptor of
   7723 		 * the packet. If we're in the interrupt delay window,
   7724 		 * delay the interrupt.
   7725 		 */
   7726 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7727 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7728 
   7729 		/*
   7730 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7731 		 * up the descriptor to encapsulate the packet for us.
   7732 		 *
   7733 		 * This is only valid on the last descriptor of the packet.
   7734 		 */
   7735 		if (vlan_has_tag(m0)) {
   7736 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7737 			    htole32(WTX_CMD_VLE);
   7738 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7739 			    = htole16(vlan_get_tag(m0));
   7740 		}
   7741 
   7742 		txs->txs_lastdesc = lasttx;
   7743 
   7744 		DPRINTF(WM_DEBUG_TX,
   7745 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7746 			device_xname(sc->sc_dev),
   7747 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7748 
   7749 		/* Sync the descriptors we're using. */
   7750 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7751 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7752 
   7753 		/* Give the packet to the chip. */
   7754 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7755 
   7756 		DPRINTF(WM_DEBUG_TX,
   7757 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7758 
   7759 		DPRINTF(WM_DEBUG_TX,
   7760 		    ("%s: TX: finished transmitting packet, job %d\n",
   7761 			device_xname(sc->sc_dev), txq->txq_snext));
   7762 
   7763 		/* Advance the tx pointer. */
   7764 		txq->txq_free -= txs->txs_ndesc;
   7765 		txq->txq_next = nexttx;
   7766 
   7767 		txq->txq_sfree--;
   7768 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7769 
   7770 		/* Pass the packet to any BPF listeners. */
   7771 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7772 	}
   7773 
   7774 	if (m0 != NULL) {
   7775 		if (!is_transmit)
   7776 			ifp->if_flags |= IFF_OACTIVE;
   7777 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7778 		WM_Q_EVCNT_INCR(txq, descdrop);
   7779 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7780 			__func__));
   7781 		m_freem(m0);
   7782 	}
   7783 
   7784 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7785 		/* No more slots; notify upper layer. */
   7786 		if (!is_transmit)
   7787 			ifp->if_flags |= IFF_OACTIVE;
   7788 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7789 	}
   7790 
   7791 	if (txq->txq_free != ofree) {
   7792 		/* Set a watchdog timer in case the chip flakes out. */
   7793 		txq->txq_lastsent = time_uptime;
   7794 		txq->txq_sending = true;
   7795 	}
   7796 }
   7797 
   7798 /*
   7799  * wm_nq_tx_offload:
   7800  *
   7801  *	Set up TCP/IP checksumming parameters for the
   7802  *	specified packet, for NEWQUEUE devices
   7803  */
   7804 static int
   7805 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7806     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7807 {
   7808 	struct mbuf *m0 = txs->txs_mbuf;
   7809 	uint32_t vl_len, mssidx, cmdc;
   7810 	struct ether_header *eh;
   7811 	int offset, iphl;
   7812 
   7813 	/*
   7814 	 * XXX It would be nice if the mbuf pkthdr had offset
   7815 	 * fields for the protocol headers.
   7816 	 */
   7817 	*cmdlenp = 0;
   7818 	*fieldsp = 0;
   7819 
   7820 	eh = mtod(m0, struct ether_header *);
   7821 	switch (htons(eh->ether_type)) {
   7822 	case ETHERTYPE_IP:
   7823 	case ETHERTYPE_IPV6:
   7824 		offset = ETHER_HDR_LEN;
   7825 		break;
   7826 
   7827 	case ETHERTYPE_VLAN:
   7828 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7829 		break;
   7830 
   7831 	default:
   7832 		/* Don't support this protocol or encapsulation. */
   7833 		*do_csum = false;
   7834 		return 0;
   7835 	}
   7836 	*do_csum = true;
   7837 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7838 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7839 
   7840 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7841 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7842 
   7843 	if ((m0->m_pkthdr.csum_flags &
   7844 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7845 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7846 	} else {
   7847 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7848 	}
   7849 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7850 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7851 
   7852 	if (vlan_has_tag(m0)) {
   7853 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7854 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7855 		*cmdlenp |= NQTX_CMD_VLE;
   7856 	}
   7857 
   7858 	mssidx = 0;
   7859 
   7860 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7861 		int hlen = offset + iphl;
   7862 		int tcp_hlen;
   7863 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7864 
   7865 		if (__predict_false(m0->m_len <
   7866 				    (hlen + sizeof(struct tcphdr)))) {
   7867 			/*
   7868 			 * TCP/IP headers are not in the first mbuf; we need
   7869 			 * to do this the slow and painful way. Let's just
   7870 			 * hope this doesn't happen very often.
   7871 			 */
   7872 			struct tcphdr th;
   7873 
   7874 			WM_Q_EVCNT_INCR(txq, tsopain);
   7875 
   7876 			m_copydata(m0, hlen, sizeof(th), &th);
   7877 			if (v4) {
   7878 				struct ip ip;
   7879 
   7880 				m_copydata(m0, offset, sizeof(ip), &ip);
   7881 				ip.ip_len = 0;
   7882 				m_copyback(m0,
   7883 				    offset + offsetof(struct ip, ip_len),
   7884 				    sizeof(ip.ip_len), &ip.ip_len);
   7885 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7886 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7887 			} else {
   7888 				struct ip6_hdr ip6;
   7889 
   7890 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7891 				ip6.ip6_plen = 0;
   7892 				m_copyback(m0,
   7893 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7894 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7895 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7896 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7897 			}
   7898 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7899 			    sizeof(th.th_sum), &th.th_sum);
   7900 
   7901 			tcp_hlen = th.th_off << 2;
   7902 		} else {
   7903 			/*
   7904 			 * TCP/IP headers are in the first mbuf; we can do
   7905 			 * this the easy way.
   7906 			 */
   7907 			struct tcphdr *th;
   7908 
   7909 			if (v4) {
   7910 				struct ip *ip =
   7911 				    (void *)(mtod(m0, char *) + offset);
   7912 				th = (void *)(mtod(m0, char *) + hlen);
   7913 
   7914 				ip->ip_len = 0;
   7915 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7916 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7917 			} else {
   7918 				struct ip6_hdr *ip6 =
   7919 				    (void *)(mtod(m0, char *) + offset);
   7920 				th = (void *)(mtod(m0, char *) + hlen);
   7921 
   7922 				ip6->ip6_plen = 0;
   7923 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7924 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7925 			}
   7926 			tcp_hlen = th->th_off << 2;
   7927 		}
   7928 		hlen += tcp_hlen;
   7929 		*cmdlenp |= NQTX_CMD_TSE;
   7930 
   7931 		if (v4) {
   7932 			WM_Q_EVCNT_INCR(txq, tso);
   7933 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7934 		} else {
   7935 			WM_Q_EVCNT_INCR(txq, tso6);
   7936 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7937 		}
   7938 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7939 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7940 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7941 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7942 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7943 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7944 	} else {
   7945 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7946 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7947 	}
   7948 
   7949 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7950 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7951 		cmdc |= NQTXC_CMD_IP4;
   7952 	}
   7953 
   7954 	if (m0->m_pkthdr.csum_flags &
   7955 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7956 		WM_Q_EVCNT_INCR(txq, tusum);
   7957 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7958 			cmdc |= NQTXC_CMD_TCP;
   7959 		else
   7960 			cmdc |= NQTXC_CMD_UDP;
   7961 
   7962 		cmdc |= NQTXC_CMD_IP4;
   7963 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7964 	}
   7965 	if (m0->m_pkthdr.csum_flags &
   7966 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7967 		WM_Q_EVCNT_INCR(txq, tusum6);
   7968 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7969 			cmdc |= NQTXC_CMD_TCP;
   7970 		else
   7971 			cmdc |= NQTXC_CMD_UDP;
   7972 
   7973 		cmdc |= NQTXC_CMD_IP6;
   7974 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7975 	}
   7976 
   7977 	/*
   7978 	 * We don't have to write context descriptor for every packet to
   7979 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7980 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7981 	 * controllers.
   7982 	 * It would be overhead to write context descriptor for every packet,
   7983 	 * however it does not cause problems.
   7984 	 */
   7985 	/* Fill in the context descriptor. */
   7986 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7987 	    htole32(vl_len);
   7988 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7989 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7990 	    htole32(cmdc);
   7991 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7992 	    htole32(mssidx);
   7993 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7994 	DPRINTF(WM_DEBUG_TX,
   7995 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7996 		txq->txq_next, 0, vl_len));
   7997 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7998 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7999 	txs->txs_ndesc++;
   8000 	return 0;
   8001 }
   8002 
   8003 /*
   8004  * wm_nq_start:		[ifnet interface function]
   8005  *
   8006  *	Start packet transmission on the interface for NEWQUEUE devices
   8007  */
   8008 static void
   8009 wm_nq_start(struct ifnet *ifp)
   8010 {
   8011 	struct wm_softc *sc = ifp->if_softc;
   8012 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8013 
   8014 #ifdef WM_MPSAFE
   8015 	KASSERT(if_is_mpsafe(ifp));
   8016 #endif
   8017 	/*
   8018 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   8019 	 */
   8020 
   8021 	mutex_enter(txq->txq_lock);
   8022 	if (!txq->txq_stopping)
   8023 		wm_nq_start_locked(ifp);
   8024 	mutex_exit(txq->txq_lock);
   8025 }
   8026 
   8027 static void
   8028 wm_nq_start_locked(struct ifnet *ifp)
   8029 {
   8030 	struct wm_softc *sc = ifp->if_softc;
   8031 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8032 
   8033 	wm_nq_send_common_locked(ifp, txq, false);
   8034 }
   8035 
   8036 static int
   8037 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8038 {
   8039 	int qid;
   8040 	struct wm_softc *sc = ifp->if_softc;
   8041 	struct wm_txqueue *txq;
   8042 
   8043 	qid = wm_select_txqueue(ifp, m);
   8044 	txq = &sc->sc_queue[qid].wmq_txq;
   8045 
   8046 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8047 		m_freem(m);
   8048 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8049 		return ENOBUFS;
   8050 	}
   8051 
   8052 	/* XXX NOMPSAFE: ifp->if_data should be percpu. */
   8053 	ifp->if_obytes += m->m_pkthdr.len;
   8054 	if (m->m_flags & M_MCAST)
   8055 		ifp->if_omcasts++;
   8056 
   8057 	/*
   8058 	 * The situations which this mutex_tryenter() fails at running time
   8059 	 * are below two patterns.
   8060 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8061 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8062 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8063 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8064 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8065 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8066 	 * stuck, either.
   8067 	 */
   8068 	if (mutex_tryenter(txq->txq_lock)) {
   8069 		if (!txq->txq_stopping)
   8070 			wm_nq_transmit_locked(ifp, txq);
   8071 		mutex_exit(txq->txq_lock);
   8072 	}
   8073 
   8074 	return 0;
   8075 }
   8076 
   8077 static void
   8078 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8079 {
   8080 
   8081 	wm_nq_send_common_locked(ifp, txq, true);
   8082 }
   8083 
   8084 static void
   8085 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8086     bool is_transmit)
   8087 {
   8088 	struct wm_softc *sc = ifp->if_softc;
   8089 	struct mbuf *m0;
   8090 	struct wm_txsoft *txs;
   8091 	bus_dmamap_t dmamap;
   8092 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8093 	bool do_csum, sent;
   8094 	bool remap = true;
   8095 
   8096 	KASSERT(mutex_owned(txq->txq_lock));
   8097 
   8098 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8099 		return;
   8100 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   8101 		return;
   8102 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8103 		return;
   8104 
   8105 	sent = false;
   8106 
   8107 	/*
   8108 	 * Loop through the send queue, setting up transmit descriptors
   8109 	 * until we drain the queue, or use up all available transmit
   8110 	 * descriptors.
   8111 	 */
   8112 	for (;;) {
   8113 		m0 = NULL;
   8114 
   8115 		/* Get a work queue entry. */
   8116 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8117 			wm_txeof(txq, UINT_MAX);
   8118 			if (txq->txq_sfree == 0) {
   8119 				DPRINTF(WM_DEBUG_TX,
   8120 				    ("%s: TX: no free job descriptors\n",
   8121 					device_xname(sc->sc_dev)));
   8122 				WM_Q_EVCNT_INCR(txq, txsstall);
   8123 				break;
   8124 			}
   8125 		}
   8126 
   8127 		/* Grab a packet off the queue. */
   8128 		if (is_transmit)
   8129 			m0 = pcq_get(txq->txq_interq);
   8130 		else
   8131 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8132 		if (m0 == NULL)
   8133 			break;
   8134 
   8135 		DPRINTF(WM_DEBUG_TX,
   8136 		    ("%s: TX: have packet to transmit: %p\n",
   8137 		    device_xname(sc->sc_dev), m0));
   8138 
   8139 		txs = &txq->txq_soft[txq->txq_snext];
   8140 		dmamap = txs->txs_dmamap;
   8141 
   8142 		/*
   8143 		 * Load the DMA map.  If this fails, the packet either
   8144 		 * didn't fit in the allotted number of segments, or we
   8145 		 * were short on resources.  For the too-many-segments
   8146 		 * case, we simply report an error and drop the packet,
   8147 		 * since we can't sanely copy a jumbo packet to a single
   8148 		 * buffer.
   8149 		 */
   8150 retry:
   8151 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8152 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8153 		if (__predict_false(error)) {
   8154 			if (error == EFBIG) {
   8155 				if (remap == true) {
   8156 					struct mbuf *m;
   8157 
   8158 					remap = false;
   8159 					m = m_defrag(m0, M_NOWAIT);
   8160 					if (m != NULL) {
   8161 						WM_Q_EVCNT_INCR(txq, defrag);
   8162 						m0 = m;
   8163 						goto retry;
   8164 					}
   8165 				}
   8166 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8167 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8168 				    "DMA segments, dropping...\n",
   8169 				    device_xname(sc->sc_dev));
   8170 				wm_dump_mbuf_chain(sc, m0);
   8171 				m_freem(m0);
   8172 				continue;
   8173 			}
   8174 			/* Short on resources, just stop for now. */
   8175 			DPRINTF(WM_DEBUG_TX,
   8176 			    ("%s: TX: dmamap load failed: %d\n",
   8177 				device_xname(sc->sc_dev), error));
   8178 			break;
   8179 		}
   8180 
   8181 		segs_needed = dmamap->dm_nsegs;
   8182 
   8183 		/*
   8184 		 * Ensure we have enough descriptors free to describe
   8185 		 * the packet. Note, we always reserve one descriptor
   8186 		 * at the end of the ring due to the semantics of the
   8187 		 * TDT register, plus one more in the event we need
   8188 		 * to load offload context.
   8189 		 */
   8190 		if (segs_needed > txq->txq_free - 2) {
   8191 			/*
   8192 			 * Not enough free descriptors to transmit this
   8193 			 * packet.  We haven't committed anything yet,
   8194 			 * so just unload the DMA map, put the packet
   8195 			 * pack on the queue, and punt. Notify the upper
   8196 			 * layer that there are no more slots left.
   8197 			 */
   8198 			DPRINTF(WM_DEBUG_TX,
   8199 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8200 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8201 				segs_needed, txq->txq_free - 1));
   8202 			if (!is_transmit)
   8203 				ifp->if_flags |= IFF_OACTIVE;
   8204 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8205 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8206 			WM_Q_EVCNT_INCR(txq, txdstall);
   8207 			break;
   8208 		}
   8209 
   8210 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8211 
   8212 		DPRINTF(WM_DEBUG_TX,
   8213 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8214 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8215 
   8216 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8217 
   8218 		/*
   8219 		 * Store a pointer to the packet so that we can free it
   8220 		 * later.
   8221 		 *
   8222 		 * Initially, we consider the number of descriptors the
   8223 		 * packet uses the number of DMA segments.  This may be
   8224 		 * incremented by 1 if we do checksum offload (a descriptor
   8225 		 * is used to set the checksum context).
   8226 		 */
   8227 		txs->txs_mbuf = m0;
   8228 		txs->txs_firstdesc = txq->txq_next;
   8229 		txs->txs_ndesc = segs_needed;
   8230 
   8231 		/* Set up offload parameters for this packet. */
   8232 		uint32_t cmdlen, fields, dcmdlen;
   8233 		if (m0->m_pkthdr.csum_flags &
   8234 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8235 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8236 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8237 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8238 			    &do_csum) != 0) {
   8239 				/* Error message already displayed. */
   8240 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8241 				continue;
   8242 			}
   8243 		} else {
   8244 			do_csum = false;
   8245 			cmdlen = 0;
   8246 			fields = 0;
   8247 		}
   8248 
   8249 		/* Sync the DMA map. */
   8250 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8251 		    BUS_DMASYNC_PREWRITE);
   8252 
   8253 		/* Initialize the first transmit descriptor. */
   8254 		nexttx = txq->txq_next;
   8255 		if (!do_csum) {
   8256 			/* Setup a legacy descriptor */
   8257 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8258 			    dmamap->dm_segs[0].ds_addr);
   8259 			txq->txq_descs[nexttx].wtx_cmdlen =
   8260 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8261 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8262 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8263 			if (vlan_has_tag(m0)) {
   8264 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8265 				    htole32(WTX_CMD_VLE);
   8266 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8267 				    htole16(vlan_get_tag(m0));
   8268 			} else
   8269 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8270 
   8271 			dcmdlen = 0;
   8272 		} else {
   8273 			/* Setup an advanced data descriptor */
   8274 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8275 			    htole64(dmamap->dm_segs[0].ds_addr);
   8276 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8277 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8278 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8279 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8280 			    htole32(fields);
   8281 			DPRINTF(WM_DEBUG_TX,
   8282 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8283 				device_xname(sc->sc_dev), nexttx,
   8284 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8285 			DPRINTF(WM_DEBUG_TX,
   8286 			    ("\t 0x%08x%08x\n", fields,
   8287 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8288 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8289 		}
   8290 
   8291 		lasttx = nexttx;
   8292 		nexttx = WM_NEXTTX(txq, nexttx);
   8293 		/*
   8294 		 * Fill in the next descriptors. legacy or advanced format
   8295 		 * is the same here
   8296 		 */
   8297 		for (seg = 1; seg < dmamap->dm_nsegs;
   8298 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8299 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8300 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8301 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8302 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8303 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8304 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8305 			lasttx = nexttx;
   8306 
   8307 			DPRINTF(WM_DEBUG_TX,
   8308 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8309 				device_xname(sc->sc_dev), nexttx,
   8310 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8311 				dmamap->dm_segs[seg].ds_len));
   8312 		}
   8313 
   8314 		KASSERT(lasttx != -1);
   8315 
   8316 		/*
   8317 		 * Set up the command byte on the last descriptor of
   8318 		 * the packet. If we're in the interrupt delay window,
   8319 		 * delay the interrupt.
   8320 		 */
   8321 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8322 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8323 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8324 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8325 
   8326 		txs->txs_lastdesc = lasttx;
   8327 
   8328 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8329 		    device_xname(sc->sc_dev),
   8330 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8331 
   8332 		/* Sync the descriptors we're using. */
   8333 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8334 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8335 
   8336 		/* Give the packet to the chip. */
   8337 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8338 		sent = true;
   8339 
   8340 		DPRINTF(WM_DEBUG_TX,
   8341 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8342 
   8343 		DPRINTF(WM_DEBUG_TX,
   8344 		    ("%s: TX: finished transmitting packet, job %d\n",
   8345 			device_xname(sc->sc_dev), txq->txq_snext));
   8346 
   8347 		/* Advance the tx pointer. */
   8348 		txq->txq_free -= txs->txs_ndesc;
   8349 		txq->txq_next = nexttx;
   8350 
   8351 		txq->txq_sfree--;
   8352 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8353 
   8354 		/* Pass the packet to any BPF listeners. */
   8355 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8356 	}
   8357 
   8358 	if (m0 != NULL) {
   8359 		if (!is_transmit)
   8360 			ifp->if_flags |= IFF_OACTIVE;
   8361 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8362 		WM_Q_EVCNT_INCR(txq, descdrop);
   8363 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8364 			__func__));
   8365 		m_freem(m0);
   8366 	}
   8367 
   8368 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8369 		/* No more slots; notify upper layer. */
   8370 		if (!is_transmit)
   8371 			ifp->if_flags |= IFF_OACTIVE;
   8372 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8373 	}
   8374 
   8375 	if (sent) {
   8376 		/* Set a watchdog timer in case the chip flakes out. */
   8377 		txq->txq_lastsent = time_uptime;
   8378 		txq->txq_sending = true;
   8379 	}
   8380 }
   8381 
   8382 static void
   8383 wm_deferred_start_locked(struct wm_txqueue *txq)
   8384 {
   8385 	struct wm_softc *sc = txq->txq_sc;
   8386 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8387 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8388 	int qid = wmq->wmq_id;
   8389 
   8390 	KASSERT(mutex_owned(txq->txq_lock));
   8391 
   8392 	if (txq->txq_stopping) {
   8393 		mutex_exit(txq->txq_lock);
   8394 		return;
   8395 	}
   8396 
   8397 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8398 		/* XXX need for ALTQ or one CPU system */
   8399 		if (qid == 0)
   8400 			wm_nq_start_locked(ifp);
   8401 		wm_nq_transmit_locked(ifp, txq);
   8402 	} else {
   8403 		/* XXX need for ALTQ or one CPU system */
   8404 		if (qid == 0)
   8405 			wm_start_locked(ifp);
   8406 		wm_transmit_locked(ifp, txq);
   8407 	}
   8408 }
   8409 
   8410 /* Interrupt */
   8411 
   8412 /*
   8413  * wm_txeof:
   8414  *
   8415  *	Helper; handle transmit interrupts.
   8416  */
   8417 static bool
   8418 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8419 {
   8420 	struct wm_softc *sc = txq->txq_sc;
   8421 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8422 	struct wm_txsoft *txs;
   8423 	int count = 0;
   8424 	int i;
   8425 	uint8_t status;
   8426 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8427 	bool more = false;
   8428 
   8429 	KASSERT(mutex_owned(txq->txq_lock));
   8430 
   8431 	if (txq->txq_stopping)
   8432 		return false;
   8433 
   8434 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8435 	/* For ALTQ and legacy(not use multiqueue) ethernet controller */
   8436 	if (wmq->wmq_id == 0)
   8437 		ifp->if_flags &= ~IFF_OACTIVE;
   8438 
   8439 	/*
   8440 	 * Go through the Tx list and free mbufs for those
   8441 	 * frames which have been transmitted.
   8442 	 */
   8443 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8444 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8445 		if (limit-- == 0) {
   8446 			more = true;
   8447 			DPRINTF(WM_DEBUG_TX,
   8448 			    ("%s: TX: loop limited, job %d is not processed\n",
   8449 				device_xname(sc->sc_dev), i));
   8450 			break;
   8451 		}
   8452 
   8453 		txs = &txq->txq_soft[i];
   8454 
   8455 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8456 			device_xname(sc->sc_dev), i));
   8457 
   8458 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8459 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8460 
   8461 		status =
   8462 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8463 		if ((status & WTX_ST_DD) == 0) {
   8464 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8465 			    BUS_DMASYNC_PREREAD);
   8466 			break;
   8467 		}
   8468 
   8469 		count++;
   8470 		DPRINTF(WM_DEBUG_TX,
   8471 		    ("%s: TX: job %d done: descs %d..%d\n",
   8472 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8473 		    txs->txs_lastdesc));
   8474 
   8475 		/*
   8476 		 * XXX We should probably be using the statistics
   8477 		 * XXX registers, but I don't know if they exist
   8478 		 * XXX on chips before the i82544.
   8479 		 */
   8480 
   8481 #ifdef WM_EVENT_COUNTERS
   8482 		if (status & WTX_ST_TU)
   8483 			WM_Q_EVCNT_INCR(txq, underrun);
   8484 #endif /* WM_EVENT_COUNTERS */
   8485 
   8486 		/*
   8487 		 * 82574 and newer's document says the status field has neither
   8488 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8489 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8490 		 * Developer's Manual", 82574 datasheet and newer.
   8491 		 *
   8492 		 * XXX I saw the LC bit was set on I218 even though the media
   8493 		 * was full duplex, so the bit might be used for other
   8494 		 * meaning ...(I have no document).
   8495 		 */
   8496 
   8497 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8498 		    && ((sc->sc_type < WM_T_82574)
   8499 			|| (sc->sc_type == WM_T_80003))) {
   8500 			ifp->if_oerrors++;
   8501 			if (status & WTX_ST_LC)
   8502 				log(LOG_WARNING, "%s: late collision\n",
   8503 				    device_xname(sc->sc_dev));
   8504 			else if (status & WTX_ST_EC) {
   8505 				ifp->if_collisions +=
   8506 				    TX_COLLISION_THRESHOLD + 1;
   8507 				log(LOG_WARNING, "%s: excessive collisions\n",
   8508 				    device_xname(sc->sc_dev));
   8509 			}
   8510 		} else
   8511 			ifp->if_opackets++;
   8512 
   8513 		txq->txq_packets++;
   8514 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8515 
   8516 		txq->txq_free += txs->txs_ndesc;
   8517 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8518 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8519 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8520 		m_freem(txs->txs_mbuf);
   8521 		txs->txs_mbuf = NULL;
   8522 	}
   8523 
   8524 	/* Update the dirty transmit buffer pointer. */
   8525 	txq->txq_sdirty = i;
   8526 	DPRINTF(WM_DEBUG_TX,
   8527 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8528 
   8529 	if (count != 0)
   8530 		rnd_add_uint32(&sc->rnd_source, count);
   8531 
   8532 	/*
   8533 	 * If there are no more pending transmissions, cancel the watchdog
   8534 	 * timer.
   8535 	 */
   8536 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8537 		txq->txq_sending = false;
   8538 
   8539 	return more;
   8540 }
   8541 
   8542 static inline uint32_t
   8543 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8544 {
   8545 	struct wm_softc *sc = rxq->rxq_sc;
   8546 
   8547 	if (sc->sc_type == WM_T_82574)
   8548 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8549 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8550 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8551 	else
   8552 		return rxq->rxq_descs[idx].wrx_status;
   8553 }
   8554 
   8555 static inline uint32_t
   8556 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8557 {
   8558 	struct wm_softc *sc = rxq->rxq_sc;
   8559 
   8560 	if (sc->sc_type == WM_T_82574)
   8561 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8562 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8563 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8564 	else
   8565 		return rxq->rxq_descs[idx].wrx_errors;
   8566 }
   8567 
   8568 static inline uint16_t
   8569 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8570 {
   8571 	struct wm_softc *sc = rxq->rxq_sc;
   8572 
   8573 	if (sc->sc_type == WM_T_82574)
   8574 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8575 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8576 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8577 	else
   8578 		return rxq->rxq_descs[idx].wrx_special;
   8579 }
   8580 
   8581 static inline int
   8582 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8583 {
   8584 	struct wm_softc *sc = rxq->rxq_sc;
   8585 
   8586 	if (sc->sc_type == WM_T_82574)
   8587 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8588 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8589 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8590 	else
   8591 		return rxq->rxq_descs[idx].wrx_len;
   8592 }
   8593 
   8594 #ifdef WM_DEBUG
   8595 static inline uint32_t
   8596 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8597 {
   8598 	struct wm_softc *sc = rxq->rxq_sc;
   8599 
   8600 	if (sc->sc_type == WM_T_82574)
   8601 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8602 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8603 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8604 	else
   8605 		return 0;
   8606 }
   8607 
   8608 static inline uint8_t
   8609 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8610 {
   8611 	struct wm_softc *sc = rxq->rxq_sc;
   8612 
   8613 	if (sc->sc_type == WM_T_82574)
   8614 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8615 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8616 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8617 	else
   8618 		return 0;
   8619 }
   8620 #endif /* WM_DEBUG */
   8621 
   8622 static inline bool
   8623 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8624     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8625 {
   8626 
   8627 	if (sc->sc_type == WM_T_82574)
   8628 		return (status & ext_bit) != 0;
   8629 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8630 		return (status & nq_bit) != 0;
   8631 	else
   8632 		return (status & legacy_bit) != 0;
   8633 }
   8634 
   8635 static inline bool
   8636 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8637     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8638 {
   8639 
   8640 	if (sc->sc_type == WM_T_82574)
   8641 		return (error & ext_bit) != 0;
   8642 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8643 		return (error & nq_bit) != 0;
   8644 	else
   8645 		return (error & legacy_bit) != 0;
   8646 }
   8647 
   8648 static inline bool
   8649 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8650 {
   8651 
   8652 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8653 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8654 		return true;
   8655 	else
   8656 		return false;
   8657 }
   8658 
   8659 static inline bool
   8660 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8661 {
   8662 	struct wm_softc *sc = rxq->rxq_sc;
   8663 
   8664 	/* XXX missing error bit for newqueue? */
   8665 	if (wm_rxdesc_is_set_error(sc, errors,
   8666 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8667 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8668 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8669 		NQRXC_ERROR_RXE)) {
   8670 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8671 		    EXTRXC_ERROR_SE, 0))
   8672 			log(LOG_WARNING, "%s: symbol error\n",
   8673 			    device_xname(sc->sc_dev));
   8674 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8675 		    EXTRXC_ERROR_SEQ, 0))
   8676 			log(LOG_WARNING, "%s: receive sequence error\n",
   8677 			    device_xname(sc->sc_dev));
   8678 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8679 		    EXTRXC_ERROR_CE, 0))
   8680 			log(LOG_WARNING, "%s: CRC error\n",
   8681 			    device_xname(sc->sc_dev));
   8682 		return true;
   8683 	}
   8684 
   8685 	return false;
   8686 }
   8687 
   8688 static inline bool
   8689 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8690 {
   8691 	struct wm_softc *sc = rxq->rxq_sc;
   8692 
   8693 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8694 		NQRXC_STATUS_DD)) {
   8695 		/* We have processed all of the receive descriptors. */
   8696 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8697 		return false;
   8698 	}
   8699 
   8700 	return true;
   8701 }
   8702 
   8703 static inline bool
   8704 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8705     uint16_t vlantag, struct mbuf *m)
   8706 {
   8707 
   8708 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8709 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8710 		vlan_set_tag(m, le16toh(vlantag));
   8711 	}
   8712 
   8713 	return true;
   8714 }
   8715 
   8716 static inline void
   8717 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8718     uint32_t errors, struct mbuf *m)
   8719 {
   8720 	struct wm_softc *sc = rxq->rxq_sc;
   8721 
   8722 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8723 		if (wm_rxdesc_is_set_status(sc, status,
   8724 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8725 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8726 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8727 			if (wm_rxdesc_is_set_error(sc, errors,
   8728 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8729 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8730 		}
   8731 		if (wm_rxdesc_is_set_status(sc, status,
   8732 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8733 			/*
   8734 			 * Note: we don't know if this was TCP or UDP,
   8735 			 * so we just set both bits, and expect the
   8736 			 * upper layers to deal.
   8737 			 */
   8738 			WM_Q_EVCNT_INCR(rxq, tusum);
   8739 			m->m_pkthdr.csum_flags |=
   8740 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8741 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8742 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8743 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8744 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8745 		}
   8746 	}
   8747 }
   8748 
   8749 /*
   8750  * wm_rxeof:
   8751  *
   8752  *	Helper; handle receive interrupts.
   8753  */
   8754 static bool
   8755 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8756 {
   8757 	struct wm_softc *sc = rxq->rxq_sc;
   8758 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8759 	struct wm_rxsoft *rxs;
   8760 	struct mbuf *m;
   8761 	int i, len;
   8762 	int count = 0;
   8763 	uint32_t status, errors;
   8764 	uint16_t vlantag;
   8765 	bool more = false;
   8766 
   8767 	KASSERT(mutex_owned(rxq->rxq_lock));
   8768 
   8769 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8770 		if (limit-- == 0) {
   8771 			rxq->rxq_ptr = i;
   8772 			more = true;
   8773 			DPRINTF(WM_DEBUG_RX,
   8774 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8775 				device_xname(sc->sc_dev), i));
   8776 			break;
   8777 		}
   8778 
   8779 		rxs = &rxq->rxq_soft[i];
   8780 
   8781 		DPRINTF(WM_DEBUG_RX,
   8782 		    ("%s: RX: checking descriptor %d\n",
   8783 			device_xname(sc->sc_dev), i));
   8784 		wm_cdrxsync(rxq, i,
   8785 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8786 
   8787 		status = wm_rxdesc_get_status(rxq, i);
   8788 		errors = wm_rxdesc_get_errors(rxq, i);
   8789 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8790 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8791 #ifdef WM_DEBUG
   8792 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8793 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8794 #endif
   8795 
   8796 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8797 			/*
   8798 			 * Update the receive pointer holding rxq_lock
   8799 			 * consistent with increment counter.
   8800 			 */
   8801 			rxq->rxq_ptr = i;
   8802 			break;
   8803 		}
   8804 
   8805 		count++;
   8806 		if (__predict_false(rxq->rxq_discard)) {
   8807 			DPRINTF(WM_DEBUG_RX,
   8808 			    ("%s: RX: discarding contents of descriptor %d\n",
   8809 				device_xname(sc->sc_dev), i));
   8810 			wm_init_rxdesc(rxq, i);
   8811 			if (wm_rxdesc_is_eop(rxq, status)) {
   8812 				/* Reset our state. */
   8813 				DPRINTF(WM_DEBUG_RX,
   8814 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8815 					device_xname(sc->sc_dev)));
   8816 				rxq->rxq_discard = 0;
   8817 			}
   8818 			continue;
   8819 		}
   8820 
   8821 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8822 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8823 
   8824 		m = rxs->rxs_mbuf;
   8825 
   8826 		/*
   8827 		 * Add a new receive buffer to the ring, unless of
   8828 		 * course the length is zero. Treat the latter as a
   8829 		 * failed mapping.
   8830 		 */
   8831 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8832 			/*
   8833 			 * Failed, throw away what we've done so
   8834 			 * far, and discard the rest of the packet.
   8835 			 */
   8836 			ifp->if_ierrors++;
   8837 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8838 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8839 			wm_init_rxdesc(rxq, i);
   8840 			if (!wm_rxdesc_is_eop(rxq, status))
   8841 				rxq->rxq_discard = 1;
   8842 			if (rxq->rxq_head != NULL)
   8843 				m_freem(rxq->rxq_head);
   8844 			WM_RXCHAIN_RESET(rxq);
   8845 			DPRINTF(WM_DEBUG_RX,
   8846 			    ("%s: RX: Rx buffer allocation failed, "
   8847 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8848 				rxq->rxq_discard ? " (discard)" : ""));
   8849 			continue;
   8850 		}
   8851 
   8852 		m->m_len = len;
   8853 		rxq->rxq_len += len;
   8854 		DPRINTF(WM_DEBUG_RX,
   8855 		    ("%s: RX: buffer at %p len %d\n",
   8856 			device_xname(sc->sc_dev), m->m_data, len));
   8857 
   8858 		/* If this is not the end of the packet, keep looking. */
   8859 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8860 			WM_RXCHAIN_LINK(rxq, m);
   8861 			DPRINTF(WM_DEBUG_RX,
   8862 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8863 				device_xname(sc->sc_dev), rxq->rxq_len));
   8864 			continue;
   8865 		}
   8866 
   8867 		/*
   8868 		 * Okay, we have the entire packet now. The chip is
   8869 		 * configured to include the FCS except I350 and I21[01]
   8870 		 * (not all chips can be configured to strip it),
   8871 		 * so we need to trim it.
   8872 		 * May need to adjust length of previous mbuf in the
   8873 		 * chain if the current mbuf is too short.
   8874 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8875 		 * is always set in I350, so we don't trim it.
   8876 		 */
   8877 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8878 		    && (sc->sc_type != WM_T_I210)
   8879 		    && (sc->sc_type != WM_T_I211)) {
   8880 			if (m->m_len < ETHER_CRC_LEN) {
   8881 				rxq->rxq_tail->m_len
   8882 				    -= (ETHER_CRC_LEN - m->m_len);
   8883 				m->m_len = 0;
   8884 			} else
   8885 				m->m_len -= ETHER_CRC_LEN;
   8886 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8887 		} else
   8888 			len = rxq->rxq_len;
   8889 
   8890 		WM_RXCHAIN_LINK(rxq, m);
   8891 
   8892 		*rxq->rxq_tailp = NULL;
   8893 		m = rxq->rxq_head;
   8894 
   8895 		WM_RXCHAIN_RESET(rxq);
   8896 
   8897 		DPRINTF(WM_DEBUG_RX,
   8898 		    ("%s: RX: have entire packet, len -> %d\n",
   8899 			device_xname(sc->sc_dev), len));
   8900 
   8901 		/* If an error occurred, update stats and drop the packet. */
   8902 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8903 			m_freem(m);
   8904 			continue;
   8905 		}
   8906 
   8907 		/* No errors.  Receive the packet. */
   8908 		m_set_rcvif(m, ifp);
   8909 		m->m_pkthdr.len = len;
   8910 		/*
   8911 		 * TODO
   8912 		 * should be save rsshash and rsstype to this mbuf.
   8913 		 */
   8914 		DPRINTF(WM_DEBUG_RX,
   8915 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8916 			device_xname(sc->sc_dev), rsstype, rsshash));
   8917 
   8918 		/*
   8919 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8920 		 * for us.  Associate the tag with the packet.
   8921 		 */
   8922 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8923 			continue;
   8924 
   8925 		/* Set up checksum info for this packet. */
   8926 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8927 		/*
   8928 		 * Update the receive pointer holding rxq_lock consistent with
   8929 		 * increment counter.
   8930 		 */
   8931 		rxq->rxq_ptr = i;
   8932 		rxq->rxq_packets++;
   8933 		rxq->rxq_bytes += len;
   8934 		mutex_exit(rxq->rxq_lock);
   8935 
   8936 		/* Pass it on. */
   8937 		if_percpuq_enqueue(sc->sc_ipq, m);
   8938 
   8939 		mutex_enter(rxq->rxq_lock);
   8940 
   8941 		if (rxq->rxq_stopping)
   8942 			break;
   8943 	}
   8944 
   8945 	if (count != 0)
   8946 		rnd_add_uint32(&sc->rnd_source, count);
   8947 
   8948 	DPRINTF(WM_DEBUG_RX,
   8949 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8950 
   8951 	return more;
   8952 }
   8953 
   8954 /*
   8955  * wm_linkintr_gmii:
   8956  *
   8957  *	Helper; handle link interrupts for GMII.
   8958  */
   8959 static void
   8960 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8961 {
   8962 	device_t dev = sc->sc_dev;
   8963 	uint32_t status, reg;
   8964 	bool link;
   8965 	int rv;
   8966 
   8967 	KASSERT(WM_CORE_LOCKED(sc));
   8968 
   8969 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   8970 		__func__));
   8971 
   8972 	if ((icr & ICR_LSC) == 0) {
   8973 		if (icr & ICR_RXSEQ)
   8974 			DPRINTF(WM_DEBUG_LINK,
   8975 			    ("%s: LINK Receive sequence error\n",
   8976 				device_xname(dev)));
   8977 		return;
   8978 	}
   8979 
   8980 	/* Link status changed */
   8981 	status = CSR_READ(sc, WMREG_STATUS);
   8982 	link = status & STATUS_LU;
   8983 	if (link) {
   8984 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8985 			device_xname(dev),
   8986 			(status & STATUS_FD) ? "FDX" : "HDX"));
   8987 	} else {
   8988 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8989 			device_xname(dev)));
   8990 	}
   8991 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   8992 		wm_gig_downshift_workaround_ich8lan(sc);
   8993 
   8994 	if ((sc->sc_type == WM_T_ICH8)
   8995 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8996 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8997 	}
   8998 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8999 		device_xname(dev)));
   9000 	mii_pollstat(&sc->sc_mii);
   9001 	if (sc->sc_type == WM_T_82543) {
   9002 		int miistatus, active;
   9003 
   9004 		/*
   9005 		 * With 82543, we need to force speed and
   9006 		 * duplex on the MAC equal to what the PHY
   9007 		 * speed and duplex configuration is.
   9008 		 */
   9009 		miistatus = sc->sc_mii.mii_media_status;
   9010 
   9011 		if (miistatus & IFM_ACTIVE) {
   9012 			active = sc->sc_mii.mii_media_active;
   9013 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9014 			switch (IFM_SUBTYPE(active)) {
   9015 			case IFM_10_T:
   9016 				sc->sc_ctrl |= CTRL_SPEED_10;
   9017 				break;
   9018 			case IFM_100_TX:
   9019 				sc->sc_ctrl |= CTRL_SPEED_100;
   9020 				break;
   9021 			case IFM_1000_T:
   9022 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9023 				break;
   9024 			default:
   9025 				/*
   9026 				 * Fiber?
   9027 				 * Shoud not enter here.
   9028 				 */
   9029 				printf("unknown media (%x)\n", active);
   9030 				break;
   9031 			}
   9032 			if (active & IFM_FDX)
   9033 				sc->sc_ctrl |= CTRL_FD;
   9034 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9035 		}
   9036 	} else if (sc->sc_type == WM_T_PCH) {
   9037 		wm_k1_gig_workaround_hv(sc,
   9038 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9039 	}
   9040 
   9041 	/*
   9042 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9043 	 * aggressive resulting in many collisions. To avoid this, increase
   9044 	 * the IPG and reduce Rx latency in the PHY.
   9045 	 */
   9046 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9047 	    && link) {
   9048 		uint32_t tipg_reg;
   9049 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9050 		bool fdx;
   9051 		uint16_t emi_addr, emi_val;
   9052 
   9053 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9054 		tipg_reg &= ~TIPG_IPGT_MASK;
   9055 		fdx = status & STATUS_FD;
   9056 
   9057 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9058 			tipg_reg |= 0xff;
   9059 			/* Reduce Rx latency in analog PHY */
   9060 			emi_val = 0;
   9061 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9062 		    fdx && speed != STATUS_SPEED_1000) {
   9063 			tipg_reg |= 0xc;
   9064 			emi_val = 1;
   9065 		} else {
   9066 			/* Roll back the default values */
   9067 			tipg_reg |= 0x08;
   9068 			emi_val = 1;
   9069 		}
   9070 
   9071 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9072 
   9073 		rv = sc->phy.acquire(sc);
   9074 		if (rv)
   9075 			return;
   9076 
   9077 		if (sc->sc_type == WM_T_PCH2)
   9078 			emi_addr = I82579_RX_CONFIG;
   9079 		else
   9080 			emi_addr = I217_RX_CONFIG;
   9081 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9082 
   9083 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9084 			uint16_t phy_reg;
   9085 
   9086 			sc->phy.readreg_locked(dev, 2,
   9087 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9088 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9089 			if (speed == STATUS_SPEED_100
   9090 			    || speed == STATUS_SPEED_10)
   9091 				phy_reg |= 0x3e8;
   9092 			else
   9093 				phy_reg |= 0xfa;
   9094 			sc->phy.writereg_locked(dev, 2,
   9095 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9096 
   9097 			if (speed == STATUS_SPEED_1000) {
   9098 				sc->phy.readreg_locked(dev, 2,
   9099 				    HV_PM_CTRL, &phy_reg);
   9100 
   9101 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9102 
   9103 				sc->phy.writereg_locked(dev, 2,
   9104 				    HV_PM_CTRL, phy_reg);
   9105 			}
   9106 		}
   9107 		sc->phy.release(sc);
   9108 
   9109 		if (rv)
   9110 			return;
   9111 
   9112 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9113 			uint16_t data, ptr_gap;
   9114 
   9115 			if (speed == STATUS_SPEED_1000) {
   9116 				rv = sc->phy.acquire(sc);
   9117 				if (rv)
   9118 					return;
   9119 
   9120 				rv = sc->phy.readreg_locked(dev, 2,
   9121 				    I219_UNKNOWN1, &data);
   9122 				if (rv) {
   9123 					sc->phy.release(sc);
   9124 					return;
   9125 				}
   9126 
   9127 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9128 				if (ptr_gap < 0x18) {
   9129 					data &= ~(0x3ff << 2);
   9130 					data |= (0x18 << 2);
   9131 					rv = sc->phy.writereg_locked(dev,
   9132 					    2, I219_UNKNOWN1, data);
   9133 				}
   9134 				sc->phy.release(sc);
   9135 				if (rv)
   9136 					return;
   9137 			} else {
   9138 				rv = sc->phy.acquire(sc);
   9139 				if (rv)
   9140 					return;
   9141 
   9142 				rv = sc->phy.writereg_locked(dev, 2,
   9143 				    I219_UNKNOWN1, 0xc023);
   9144 				sc->phy.release(sc);
   9145 				if (rv)
   9146 					return;
   9147 
   9148 			}
   9149 		}
   9150 	}
   9151 
   9152 	/*
   9153 	 * I217 Packet Loss issue:
   9154 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9155 	 * on power up.
   9156 	 * Set the Beacon Duration for I217 to 8 usec
   9157 	 */
   9158 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9159 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9160 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9161 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9162 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9163 	}
   9164 
   9165 	/* Work-around I218 hang issue */
   9166 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9167 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9168 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9169 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9170 		wm_k1_workaround_lpt_lp(sc, link);
   9171 
   9172 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9173 		/*
   9174 		 * Set platform power management values for Latency
   9175 		 * Tolerance Reporting (LTR)
   9176 		 */
   9177 		wm_platform_pm_pch_lpt(sc,
   9178 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9179 	}
   9180 
   9181 	/* Clear link partner's EEE ability */
   9182 	sc->eee_lp_ability = 0;
   9183 
   9184 	/* FEXTNVM6 K1-off workaround */
   9185 	if (sc->sc_type == WM_T_PCH_SPT) {
   9186 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9187 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9188 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9189 		else
   9190 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9191 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9192 	}
   9193 
   9194 	if (!link)
   9195 		return;
   9196 
   9197 	switch (sc->sc_type) {
   9198 	case WM_T_PCH2:
   9199 		wm_k1_workaround_lv(sc);
   9200 		/* FALLTHROUGH */
   9201 	case WM_T_PCH:
   9202 		if (sc->sc_phytype == WMPHY_82578)
   9203 			wm_link_stall_workaround_hv(sc);
   9204 		break;
   9205 	default:
   9206 		break;
   9207 	}
   9208 
   9209 	/* Enable/Disable EEE after link up */
   9210 	if (sc->sc_phytype > WMPHY_82579)
   9211 		wm_set_eee_pchlan(sc);
   9212 }
   9213 
   9214 /*
   9215  * wm_linkintr_tbi:
   9216  *
   9217  *	Helper; handle link interrupts for TBI mode.
   9218  */
   9219 static void
   9220 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9221 {
   9222 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9223 	uint32_t status;
   9224 
   9225 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9226 		__func__));
   9227 
   9228 	status = CSR_READ(sc, WMREG_STATUS);
   9229 	if (icr & ICR_LSC) {
   9230 		wm_check_for_link(sc);
   9231 		if (status & STATUS_LU) {
   9232 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9233 				device_xname(sc->sc_dev),
   9234 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9235 			/*
   9236 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9237 			 * so we should update sc->sc_ctrl
   9238 			 */
   9239 
   9240 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9241 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9242 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9243 			if (status & STATUS_FD)
   9244 				sc->sc_tctl |=
   9245 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9246 			else
   9247 				sc->sc_tctl |=
   9248 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9249 			if (sc->sc_ctrl & CTRL_TFCE)
   9250 				sc->sc_fcrtl |= FCRTL_XONE;
   9251 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9252 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9253 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9254 			sc->sc_tbi_linkup = 1;
   9255 			if_link_state_change(ifp, LINK_STATE_UP);
   9256 		} else {
   9257 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9258 				device_xname(sc->sc_dev)));
   9259 			sc->sc_tbi_linkup = 0;
   9260 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9261 		}
   9262 		/* Update LED */
   9263 		wm_tbi_serdes_set_linkled(sc);
   9264 	} else if (icr & ICR_RXSEQ)
   9265 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9266 			device_xname(sc->sc_dev)));
   9267 }
   9268 
   9269 /*
   9270  * wm_linkintr_serdes:
   9271  *
   9272  *	Helper; handle link interrupts for TBI mode.
   9273  */
   9274 static void
   9275 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9276 {
   9277 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9278 	struct mii_data *mii = &sc->sc_mii;
   9279 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9280 	uint32_t pcs_adv, pcs_lpab, reg;
   9281 
   9282 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9283 		__func__));
   9284 
   9285 	if (icr & ICR_LSC) {
   9286 		/* Check PCS */
   9287 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9288 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9289 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9290 				device_xname(sc->sc_dev)));
   9291 			mii->mii_media_status |= IFM_ACTIVE;
   9292 			sc->sc_tbi_linkup = 1;
   9293 			if_link_state_change(ifp, LINK_STATE_UP);
   9294 		} else {
   9295 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9296 				device_xname(sc->sc_dev)));
   9297 			mii->mii_media_status |= IFM_NONE;
   9298 			sc->sc_tbi_linkup = 0;
   9299 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9300 			wm_tbi_serdes_set_linkled(sc);
   9301 			return;
   9302 		}
   9303 		mii->mii_media_active |= IFM_1000_SX;
   9304 		if ((reg & PCS_LSTS_FDX) != 0)
   9305 			mii->mii_media_active |= IFM_FDX;
   9306 		else
   9307 			mii->mii_media_active |= IFM_HDX;
   9308 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9309 			/* Check flow */
   9310 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9311 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9312 				DPRINTF(WM_DEBUG_LINK,
   9313 				    ("XXX LINKOK but not ACOMP\n"));
   9314 				return;
   9315 			}
   9316 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9317 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9318 			DPRINTF(WM_DEBUG_LINK,
   9319 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9320 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9321 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9322 				mii->mii_media_active |= IFM_FLOW
   9323 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9324 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9325 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9326 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9327 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9328 				mii->mii_media_active |= IFM_FLOW
   9329 				    | IFM_ETH_TXPAUSE;
   9330 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9331 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9332 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9333 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9334 				mii->mii_media_active |= IFM_FLOW
   9335 				    | IFM_ETH_RXPAUSE;
   9336 		}
   9337 		/* Update LED */
   9338 		wm_tbi_serdes_set_linkled(sc);
   9339 	} else
   9340 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9341 		    device_xname(sc->sc_dev)));
   9342 }
   9343 
   9344 /*
   9345  * wm_linkintr:
   9346  *
   9347  *	Helper; handle link interrupts.
   9348  */
   9349 static void
   9350 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9351 {
   9352 
   9353 	KASSERT(WM_CORE_LOCKED(sc));
   9354 
   9355 	if (sc->sc_flags & WM_F_HAS_MII)
   9356 		wm_linkintr_gmii(sc, icr);
   9357 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9358 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9359 		wm_linkintr_serdes(sc, icr);
   9360 	else
   9361 		wm_linkintr_tbi(sc, icr);
   9362 }
   9363 
   9364 /*
   9365  * wm_intr_legacy:
   9366  *
   9367  *	Interrupt service routine for INTx and MSI.
   9368  */
   9369 static int
   9370 wm_intr_legacy(void *arg)
   9371 {
   9372 	struct wm_softc *sc = arg;
   9373 	struct wm_queue *wmq = &sc->sc_queue[0];
   9374 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9375 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9376 	uint32_t icr, rndval = 0;
   9377 	int handled = 0;
   9378 
   9379 	while (1 /* CONSTCOND */) {
   9380 		icr = CSR_READ(sc, WMREG_ICR);
   9381 		if ((icr & sc->sc_icr) == 0)
   9382 			break;
   9383 		if (handled == 0)
   9384 			DPRINTF(WM_DEBUG_TX,
   9385 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9386 		if (rndval == 0)
   9387 			rndval = icr;
   9388 
   9389 		mutex_enter(rxq->rxq_lock);
   9390 
   9391 		if (rxq->rxq_stopping) {
   9392 			mutex_exit(rxq->rxq_lock);
   9393 			break;
   9394 		}
   9395 
   9396 		handled = 1;
   9397 
   9398 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9399 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9400 			DPRINTF(WM_DEBUG_RX,
   9401 			    ("%s: RX: got Rx intr 0x%08x\n",
   9402 				device_xname(sc->sc_dev),
   9403 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9404 			WM_Q_EVCNT_INCR(rxq, intr);
   9405 		}
   9406 #endif
   9407 		/*
   9408 		 * wm_rxeof() does *not* call upper layer functions directly,
   9409 		 * as if_percpuq_enqueue() just call softint_schedule().
   9410 		 * So, we can call wm_rxeof() in interrupt context.
   9411 		 */
   9412 		wm_rxeof(rxq, UINT_MAX);
   9413 
   9414 		mutex_exit(rxq->rxq_lock);
   9415 		mutex_enter(txq->txq_lock);
   9416 
   9417 		if (txq->txq_stopping) {
   9418 			mutex_exit(txq->txq_lock);
   9419 			break;
   9420 		}
   9421 
   9422 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9423 		if (icr & ICR_TXDW) {
   9424 			DPRINTF(WM_DEBUG_TX,
   9425 			    ("%s: TX: got TXDW interrupt\n",
   9426 				device_xname(sc->sc_dev)));
   9427 			WM_Q_EVCNT_INCR(txq, txdw);
   9428 		}
   9429 #endif
   9430 		wm_txeof(txq, UINT_MAX);
   9431 
   9432 		mutex_exit(txq->txq_lock);
   9433 		WM_CORE_LOCK(sc);
   9434 
   9435 		if (sc->sc_core_stopping) {
   9436 			WM_CORE_UNLOCK(sc);
   9437 			break;
   9438 		}
   9439 
   9440 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9441 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9442 			wm_linkintr(sc, icr);
   9443 		}
   9444 
   9445 		WM_CORE_UNLOCK(sc);
   9446 
   9447 		if (icr & ICR_RXO) {
   9448 #if defined(WM_DEBUG)
   9449 			log(LOG_WARNING, "%s: Receive overrun\n",
   9450 			    device_xname(sc->sc_dev));
   9451 #endif /* defined(WM_DEBUG) */
   9452 		}
   9453 	}
   9454 
   9455 	rnd_add_uint32(&sc->rnd_source, rndval);
   9456 
   9457 	if (handled) {
   9458 		/* Try to get more packets going. */
   9459 		softint_schedule(wmq->wmq_si);
   9460 	}
   9461 
   9462 	return handled;
   9463 }
   9464 
   9465 static inline void
   9466 wm_txrxintr_disable(struct wm_queue *wmq)
   9467 {
   9468 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9469 
   9470 	if (sc->sc_type == WM_T_82574)
   9471 		CSR_WRITE(sc, WMREG_IMC,
   9472 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9473 	else if (sc->sc_type == WM_T_82575)
   9474 		CSR_WRITE(sc, WMREG_EIMC,
   9475 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9476 	else
   9477 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9478 }
   9479 
   9480 static inline void
   9481 wm_txrxintr_enable(struct wm_queue *wmq)
   9482 {
   9483 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9484 
   9485 	wm_itrs_calculate(sc, wmq);
   9486 
   9487 	/*
   9488 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9489 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9490 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9491 	 * while each wm_handle_queue(wmq) is runnig.
   9492 	 */
   9493 	if (sc->sc_type == WM_T_82574)
   9494 		CSR_WRITE(sc, WMREG_IMS,
   9495 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9496 	else if (sc->sc_type == WM_T_82575)
   9497 		CSR_WRITE(sc, WMREG_EIMS,
   9498 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9499 	else
   9500 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9501 }
   9502 
   9503 static int
   9504 wm_txrxintr_msix(void *arg)
   9505 {
   9506 	struct wm_queue *wmq = arg;
   9507 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9508 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9509 	struct wm_softc *sc = txq->txq_sc;
   9510 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9511 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9512 	bool txmore;
   9513 	bool rxmore;
   9514 
   9515 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9516 
   9517 	DPRINTF(WM_DEBUG_TX,
   9518 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9519 
   9520 	wm_txrxintr_disable(wmq);
   9521 
   9522 	mutex_enter(txq->txq_lock);
   9523 
   9524 	if (txq->txq_stopping) {
   9525 		mutex_exit(txq->txq_lock);
   9526 		return 0;
   9527 	}
   9528 
   9529 	WM_Q_EVCNT_INCR(txq, txdw);
   9530 	txmore = wm_txeof(txq, txlimit);
   9531 	/* wm_deferred start() is done in wm_handle_queue(). */
   9532 	mutex_exit(txq->txq_lock);
   9533 
   9534 	DPRINTF(WM_DEBUG_RX,
   9535 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9536 	mutex_enter(rxq->rxq_lock);
   9537 
   9538 	if (rxq->rxq_stopping) {
   9539 		mutex_exit(rxq->rxq_lock);
   9540 		return 0;
   9541 	}
   9542 
   9543 	WM_Q_EVCNT_INCR(rxq, intr);
   9544 	rxmore = wm_rxeof(rxq, rxlimit);
   9545 	mutex_exit(rxq->rxq_lock);
   9546 
   9547 	wm_itrs_writereg(sc, wmq);
   9548 
   9549 	if (txmore || rxmore)
   9550 		softint_schedule(wmq->wmq_si);
   9551 	else
   9552 		wm_txrxintr_enable(wmq);
   9553 
   9554 	return 1;
   9555 }
   9556 
   9557 static void
   9558 wm_handle_queue(void *arg)
   9559 {
   9560 	struct wm_queue *wmq = arg;
   9561 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9562 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9563 	struct wm_softc *sc = txq->txq_sc;
   9564 	u_int txlimit = sc->sc_tx_process_limit;
   9565 	u_int rxlimit = sc->sc_rx_process_limit;
   9566 	bool txmore;
   9567 	bool rxmore;
   9568 
   9569 	mutex_enter(txq->txq_lock);
   9570 	if (txq->txq_stopping) {
   9571 		mutex_exit(txq->txq_lock);
   9572 		return;
   9573 	}
   9574 	txmore = wm_txeof(txq, txlimit);
   9575 	wm_deferred_start_locked(txq);
   9576 	mutex_exit(txq->txq_lock);
   9577 
   9578 	mutex_enter(rxq->rxq_lock);
   9579 	if (rxq->rxq_stopping) {
   9580 		mutex_exit(rxq->rxq_lock);
   9581 		return;
   9582 	}
   9583 	WM_Q_EVCNT_INCR(rxq, defer);
   9584 	rxmore = wm_rxeof(rxq, rxlimit);
   9585 	mutex_exit(rxq->rxq_lock);
   9586 
   9587 	if (txmore || rxmore)
   9588 		softint_schedule(wmq->wmq_si);
   9589 	else
   9590 		wm_txrxintr_enable(wmq);
   9591 }
   9592 
   9593 /*
   9594  * wm_linkintr_msix:
   9595  *
   9596  *	Interrupt service routine for link status change for MSI-X.
   9597  */
   9598 static int
   9599 wm_linkintr_msix(void *arg)
   9600 {
   9601 	struct wm_softc *sc = arg;
   9602 	uint32_t reg;
   9603 	bool has_rxo;
   9604 
   9605 	DPRINTF(WM_DEBUG_LINK,
   9606 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9607 
   9608 	reg = CSR_READ(sc, WMREG_ICR);
   9609 	WM_CORE_LOCK(sc);
   9610 	if (sc->sc_core_stopping)
   9611 		goto out;
   9612 
   9613 	if ((reg & ICR_LSC) != 0) {
   9614 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9615 		wm_linkintr(sc, ICR_LSC);
   9616 	}
   9617 
   9618 	/*
   9619 	 * XXX 82574 MSI-X mode workaround
   9620 	 *
   9621 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9622 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9623 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9624 	 * interrupts by writing WMREG_ICS to process receive packets.
   9625 	 */
   9626 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9627 #if defined(WM_DEBUG)
   9628 		log(LOG_WARNING, "%s: Receive overrun\n",
   9629 		    device_xname(sc->sc_dev));
   9630 #endif /* defined(WM_DEBUG) */
   9631 
   9632 		has_rxo = true;
   9633 		/*
   9634 		 * The RXO interrupt is very high rate when receive traffic is
   9635 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9636 		 * interrupts. ICR_OTHER will be enabled at the end of
   9637 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9638 		 * ICR_RXQ(1) interrupts.
   9639 		 */
   9640 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9641 
   9642 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9643 	}
   9644 
   9645 
   9646 
   9647 out:
   9648 	WM_CORE_UNLOCK(sc);
   9649 
   9650 	if (sc->sc_type == WM_T_82574) {
   9651 		if (!has_rxo)
   9652 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9653 		else
   9654 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9655 	} else if (sc->sc_type == WM_T_82575)
   9656 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9657 	else
   9658 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9659 
   9660 	return 1;
   9661 }
   9662 
   9663 /*
   9664  * Media related.
   9665  * GMII, SGMII, TBI (and SERDES)
   9666  */
   9667 
   9668 /* Common */
   9669 
   9670 /*
   9671  * wm_tbi_serdes_set_linkled:
   9672  *
   9673  *	Update the link LED on TBI and SERDES devices.
   9674  */
   9675 static void
   9676 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9677 {
   9678 
   9679 	if (sc->sc_tbi_linkup)
   9680 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9681 	else
   9682 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9683 
   9684 	/* 82540 or newer devices are active low */
   9685 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9686 
   9687 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9688 }
   9689 
   9690 /* GMII related */
   9691 
   9692 /*
   9693  * wm_gmii_reset:
   9694  *
   9695  *	Reset the PHY.
   9696  */
   9697 static void
   9698 wm_gmii_reset(struct wm_softc *sc)
   9699 {
   9700 	uint32_t reg;
   9701 	int rv;
   9702 
   9703 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9704 		device_xname(sc->sc_dev), __func__));
   9705 
   9706 	rv = sc->phy.acquire(sc);
   9707 	if (rv != 0) {
   9708 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9709 		    __func__);
   9710 		return;
   9711 	}
   9712 
   9713 	switch (sc->sc_type) {
   9714 	case WM_T_82542_2_0:
   9715 	case WM_T_82542_2_1:
   9716 		/* null */
   9717 		break;
   9718 	case WM_T_82543:
   9719 		/*
   9720 		 * With 82543, we need to force speed and duplex on the MAC
   9721 		 * equal to what the PHY speed and duplex configuration is.
   9722 		 * In addition, we need to perform a hardware reset on the PHY
   9723 		 * to take it out of reset.
   9724 		 */
   9725 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9726 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9727 
   9728 		/* The PHY reset pin is active-low. */
   9729 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9730 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9731 		    CTRL_EXT_SWDPIN(4));
   9732 		reg |= CTRL_EXT_SWDPIO(4);
   9733 
   9734 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9735 		CSR_WRITE_FLUSH(sc);
   9736 		delay(10*1000);
   9737 
   9738 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9739 		CSR_WRITE_FLUSH(sc);
   9740 		delay(150);
   9741 #if 0
   9742 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9743 #endif
   9744 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9745 		break;
   9746 	case WM_T_82544:	/* Reset 10000us */
   9747 	case WM_T_82540:
   9748 	case WM_T_82545:
   9749 	case WM_T_82545_3:
   9750 	case WM_T_82546:
   9751 	case WM_T_82546_3:
   9752 	case WM_T_82541:
   9753 	case WM_T_82541_2:
   9754 	case WM_T_82547:
   9755 	case WM_T_82547_2:
   9756 	case WM_T_82571:	/* Reset 100us */
   9757 	case WM_T_82572:
   9758 	case WM_T_82573:
   9759 	case WM_T_82574:
   9760 	case WM_T_82575:
   9761 	case WM_T_82576:
   9762 	case WM_T_82580:
   9763 	case WM_T_I350:
   9764 	case WM_T_I354:
   9765 	case WM_T_I210:
   9766 	case WM_T_I211:
   9767 	case WM_T_82583:
   9768 	case WM_T_80003:
   9769 		/* Generic reset */
   9770 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9771 		CSR_WRITE_FLUSH(sc);
   9772 		delay(20000);
   9773 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9774 		CSR_WRITE_FLUSH(sc);
   9775 		delay(20000);
   9776 
   9777 		if ((sc->sc_type == WM_T_82541)
   9778 		    || (sc->sc_type == WM_T_82541_2)
   9779 		    || (sc->sc_type == WM_T_82547)
   9780 		    || (sc->sc_type == WM_T_82547_2)) {
   9781 			/* Workaround for igp are done in igp_reset() */
   9782 			/* XXX add code to set LED after phy reset */
   9783 		}
   9784 		break;
   9785 	case WM_T_ICH8:
   9786 	case WM_T_ICH9:
   9787 	case WM_T_ICH10:
   9788 	case WM_T_PCH:
   9789 	case WM_T_PCH2:
   9790 	case WM_T_PCH_LPT:
   9791 	case WM_T_PCH_SPT:
   9792 	case WM_T_PCH_CNP:
   9793 		/* Generic reset */
   9794 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9795 		CSR_WRITE_FLUSH(sc);
   9796 		delay(100);
   9797 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9798 		CSR_WRITE_FLUSH(sc);
   9799 		delay(150);
   9800 		break;
   9801 	default:
   9802 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9803 		    __func__);
   9804 		break;
   9805 	}
   9806 
   9807 	sc->phy.release(sc);
   9808 
   9809 	/* get_cfg_done */
   9810 	wm_get_cfg_done(sc);
   9811 
   9812 	/* Extra setup */
   9813 	switch (sc->sc_type) {
   9814 	case WM_T_82542_2_0:
   9815 	case WM_T_82542_2_1:
   9816 	case WM_T_82543:
   9817 	case WM_T_82544:
   9818 	case WM_T_82540:
   9819 	case WM_T_82545:
   9820 	case WM_T_82545_3:
   9821 	case WM_T_82546:
   9822 	case WM_T_82546_3:
   9823 	case WM_T_82541_2:
   9824 	case WM_T_82547_2:
   9825 	case WM_T_82571:
   9826 	case WM_T_82572:
   9827 	case WM_T_82573:
   9828 	case WM_T_82574:
   9829 	case WM_T_82583:
   9830 	case WM_T_82575:
   9831 	case WM_T_82576:
   9832 	case WM_T_82580:
   9833 	case WM_T_I350:
   9834 	case WM_T_I354:
   9835 	case WM_T_I210:
   9836 	case WM_T_I211:
   9837 	case WM_T_80003:
   9838 		/* Null */
   9839 		break;
   9840 	case WM_T_82541:
   9841 	case WM_T_82547:
   9842 		/* XXX Configure actively LED after PHY reset */
   9843 		break;
   9844 	case WM_T_ICH8:
   9845 	case WM_T_ICH9:
   9846 	case WM_T_ICH10:
   9847 	case WM_T_PCH:
   9848 	case WM_T_PCH2:
   9849 	case WM_T_PCH_LPT:
   9850 	case WM_T_PCH_SPT:
   9851 	case WM_T_PCH_CNP:
   9852 		wm_phy_post_reset(sc);
   9853 		break;
   9854 	default:
   9855 		panic("%s: unknown type\n", __func__);
   9856 		break;
   9857 	}
   9858 }
   9859 
   9860 /*
   9861  * Setup sc_phytype and mii_{read|write}reg.
   9862  *
   9863  *  To identify PHY type, correct read/write function should be selected.
   9864  * To select correct read/write function, PCI ID or MAC type are required
   9865  * without accessing PHY registers.
   9866  *
   9867  *  On the first call of this function, PHY ID is not known yet. Check
   9868  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9869  * result might be incorrect.
   9870  *
   9871  *  In the second call, PHY OUI and model is used to identify PHY type.
   9872  * It might not be perfpect because of the lack of compared entry, but it
   9873  * would be better than the first call.
   9874  *
   9875  *  If the detected new result and previous assumption is different,
   9876  * diagnous message will be printed.
   9877  */
   9878 static void
   9879 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9880     uint16_t phy_model)
   9881 {
   9882 	device_t dev = sc->sc_dev;
   9883 	struct mii_data *mii = &sc->sc_mii;
   9884 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9885 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9886 	mii_readreg_t new_readreg;
   9887 	mii_writereg_t new_writereg;
   9888 
   9889 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9890 		device_xname(sc->sc_dev), __func__));
   9891 
   9892 	if (mii->mii_readreg == NULL) {
   9893 		/*
   9894 		 *  This is the first call of this function. For ICH and PCH
   9895 		 * variants, it's difficult to determine the PHY access method
   9896 		 * by sc_type, so use the PCI product ID for some devices.
   9897 		 */
   9898 
   9899 		switch (sc->sc_pcidevid) {
   9900 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9901 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9902 			/* 82577 */
   9903 			new_phytype = WMPHY_82577;
   9904 			break;
   9905 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9906 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9907 			/* 82578 */
   9908 			new_phytype = WMPHY_82578;
   9909 			break;
   9910 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9911 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9912 			/* 82579 */
   9913 			new_phytype = WMPHY_82579;
   9914 			break;
   9915 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9916 		case PCI_PRODUCT_INTEL_82801I_BM:
   9917 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9918 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9919 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9920 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9921 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9922 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9923 			/* ICH8, 9, 10 with 82567 */
   9924 			new_phytype = WMPHY_BM;
   9925 			break;
   9926 		default:
   9927 			break;
   9928 		}
   9929 	} else {
   9930 		/* It's not the first call. Use PHY OUI and model */
   9931 		switch (phy_oui) {
   9932 		case MII_OUI_ATTANSIC: /* XXX ??? */
   9933 			switch (phy_model) {
   9934 			case 0x0004: /* XXX */
   9935 				new_phytype = WMPHY_82578;
   9936 				break;
   9937 			default:
   9938 				break;
   9939 			}
   9940 			break;
   9941 		case MII_OUI_xxMARVELL:
   9942 			switch (phy_model) {
   9943 			case MII_MODEL_xxMARVELL_I210:
   9944 				new_phytype = WMPHY_I210;
   9945 				break;
   9946 			case MII_MODEL_xxMARVELL_E1011:
   9947 			case MII_MODEL_xxMARVELL_E1000_3:
   9948 			case MII_MODEL_xxMARVELL_E1000_5:
   9949 			case MII_MODEL_xxMARVELL_E1112:
   9950 				new_phytype = WMPHY_M88;
   9951 				break;
   9952 			case MII_MODEL_xxMARVELL_E1149:
   9953 				new_phytype = WMPHY_BM;
   9954 				break;
   9955 			case MII_MODEL_xxMARVELL_E1111:
   9956 			case MII_MODEL_xxMARVELL_I347:
   9957 			case MII_MODEL_xxMARVELL_E1512:
   9958 			case MII_MODEL_xxMARVELL_E1340M:
   9959 			case MII_MODEL_xxMARVELL_E1543:
   9960 				new_phytype = WMPHY_M88;
   9961 				break;
   9962 			case MII_MODEL_xxMARVELL_I82563:
   9963 				new_phytype = WMPHY_GG82563;
   9964 				break;
   9965 			default:
   9966 				break;
   9967 			}
   9968 			break;
   9969 		case MII_OUI_INTEL:
   9970 			switch (phy_model) {
   9971 			case MII_MODEL_INTEL_I82577:
   9972 				new_phytype = WMPHY_82577;
   9973 				break;
   9974 			case MII_MODEL_INTEL_I82579:
   9975 				new_phytype = WMPHY_82579;
   9976 				break;
   9977 			case MII_MODEL_INTEL_I217:
   9978 				new_phytype = WMPHY_I217;
   9979 				break;
   9980 			case MII_MODEL_INTEL_I82580:
   9981 			case MII_MODEL_INTEL_I350:
   9982 				new_phytype = WMPHY_82580;
   9983 				break;
   9984 			default:
   9985 				break;
   9986 			}
   9987 			break;
   9988 		case MII_OUI_yyINTEL:
   9989 			switch (phy_model) {
   9990 			case MII_MODEL_yyINTEL_I82562G:
   9991 			case MII_MODEL_yyINTEL_I82562EM:
   9992 			case MII_MODEL_yyINTEL_I82562ET:
   9993 				new_phytype = WMPHY_IFE;
   9994 				break;
   9995 			case MII_MODEL_yyINTEL_IGP01E1000:
   9996 				new_phytype = WMPHY_IGP;
   9997 				break;
   9998 			case MII_MODEL_yyINTEL_I82566:
   9999 				new_phytype = WMPHY_IGP_3;
   10000 				break;
   10001 			default:
   10002 				break;
   10003 			}
   10004 			break;
   10005 		default:
   10006 			break;
   10007 		}
   10008 		if (new_phytype == WMPHY_UNKNOWN)
   10009 			aprint_verbose_dev(dev,
   10010 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
   10011 			    __func__, phy_oui, phy_model);
   10012 
   10013 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10014 		    && (sc->sc_phytype != new_phytype )) {
   10015 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10016 			    "was incorrect. PHY type from PHY ID = %u\n",
   10017 			    sc->sc_phytype, new_phytype);
   10018 		}
   10019 	}
   10020 
   10021 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10022 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10023 		/* SGMII */
   10024 		new_readreg = wm_sgmii_readreg;
   10025 		new_writereg = wm_sgmii_writereg;
   10026 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10027 		/* BM2 (phyaddr == 1) */
   10028 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10029 		    && (new_phytype != WMPHY_BM)
   10030 		    && (new_phytype != WMPHY_UNKNOWN))
   10031 			doubt_phytype = new_phytype;
   10032 		new_phytype = WMPHY_BM;
   10033 		new_readreg = wm_gmii_bm_readreg;
   10034 		new_writereg = wm_gmii_bm_writereg;
   10035 	} else if (sc->sc_type >= WM_T_PCH) {
   10036 		/* All PCH* use _hv_ */
   10037 		new_readreg = wm_gmii_hv_readreg;
   10038 		new_writereg = wm_gmii_hv_writereg;
   10039 	} else if (sc->sc_type >= WM_T_ICH8) {
   10040 		/* non-82567 ICH8, 9 and 10 */
   10041 		new_readreg = wm_gmii_i82544_readreg;
   10042 		new_writereg = wm_gmii_i82544_writereg;
   10043 	} else if (sc->sc_type >= WM_T_80003) {
   10044 		/* 80003 */
   10045 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10046 		    && (new_phytype != WMPHY_GG82563)
   10047 		    && (new_phytype != WMPHY_UNKNOWN))
   10048 			doubt_phytype = new_phytype;
   10049 		new_phytype = WMPHY_GG82563;
   10050 		new_readreg = wm_gmii_i80003_readreg;
   10051 		new_writereg = wm_gmii_i80003_writereg;
   10052 	} else if (sc->sc_type >= WM_T_I210) {
   10053 		/* I210 and I211 */
   10054 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10055 		    && (new_phytype != WMPHY_I210)
   10056 		    && (new_phytype != WMPHY_UNKNOWN))
   10057 			doubt_phytype = new_phytype;
   10058 		new_phytype = WMPHY_I210;
   10059 		new_readreg = wm_gmii_gs40g_readreg;
   10060 		new_writereg = wm_gmii_gs40g_writereg;
   10061 	} else if (sc->sc_type >= WM_T_82580) {
   10062 		/* 82580, I350 and I354 */
   10063 		new_readreg = wm_gmii_82580_readreg;
   10064 		new_writereg = wm_gmii_82580_writereg;
   10065 	} else if (sc->sc_type >= WM_T_82544) {
   10066 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10067 		new_readreg = wm_gmii_i82544_readreg;
   10068 		new_writereg = wm_gmii_i82544_writereg;
   10069 	} else {
   10070 		new_readreg = wm_gmii_i82543_readreg;
   10071 		new_writereg = wm_gmii_i82543_writereg;
   10072 	}
   10073 
   10074 	if (new_phytype == WMPHY_BM) {
   10075 		/* All BM use _bm_ */
   10076 		new_readreg = wm_gmii_bm_readreg;
   10077 		new_writereg = wm_gmii_bm_writereg;
   10078 	}
   10079 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10080 		/* All PCH* use _hv_ */
   10081 		new_readreg = wm_gmii_hv_readreg;
   10082 		new_writereg = wm_gmii_hv_writereg;
   10083 	}
   10084 
   10085 	/* Diag output */
   10086 	if (doubt_phytype != WMPHY_UNKNOWN)
   10087 		aprint_error_dev(dev, "Assumed new PHY type was "
   10088 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10089 		    new_phytype);
   10090 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10091 	    && (sc->sc_phytype != new_phytype ))
   10092 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10093 		    "was incorrect. New PHY type = %u\n",
   10094 		    sc->sc_phytype, new_phytype);
   10095 
   10096 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   10097 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   10098 
   10099 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   10100 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   10101 		    "function was incorrect.\n");
   10102 
   10103 	/* Update now */
   10104 	sc->sc_phytype = new_phytype;
   10105 	mii->mii_readreg = new_readreg;
   10106 	mii->mii_writereg = new_writereg;
   10107 	if (new_readreg == wm_gmii_hv_readreg) {
   10108 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10109 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10110 	} else if (new_readreg == wm_sgmii_readreg) {
   10111 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10112 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10113 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10114 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10115 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10116 	}
   10117 }
   10118 
   10119 /*
   10120  * wm_get_phy_id_82575:
   10121  *
   10122  * Return PHY ID. Return -1 if it failed.
   10123  */
   10124 static int
   10125 wm_get_phy_id_82575(struct wm_softc *sc)
   10126 {
   10127 	uint32_t reg;
   10128 	int phyid = -1;
   10129 
   10130 	/* XXX */
   10131 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10132 		return -1;
   10133 
   10134 	if (wm_sgmii_uses_mdio(sc)) {
   10135 		switch (sc->sc_type) {
   10136 		case WM_T_82575:
   10137 		case WM_T_82576:
   10138 			reg = CSR_READ(sc, WMREG_MDIC);
   10139 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10140 			break;
   10141 		case WM_T_82580:
   10142 		case WM_T_I350:
   10143 		case WM_T_I354:
   10144 		case WM_T_I210:
   10145 		case WM_T_I211:
   10146 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10147 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10148 			break;
   10149 		default:
   10150 			return -1;
   10151 		}
   10152 	}
   10153 
   10154 	return phyid;
   10155 }
   10156 
   10157 
   10158 /*
   10159  * wm_gmii_mediainit:
   10160  *
   10161  *	Initialize media for use on 1000BASE-T devices.
   10162  */
   10163 static void
   10164 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10165 {
   10166 	device_t dev = sc->sc_dev;
   10167 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10168 	struct mii_data *mii = &sc->sc_mii;
   10169 	uint32_t reg;
   10170 
   10171 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10172 		device_xname(sc->sc_dev), __func__));
   10173 
   10174 	/* We have GMII. */
   10175 	sc->sc_flags |= WM_F_HAS_MII;
   10176 
   10177 	if (sc->sc_type == WM_T_80003)
   10178 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10179 	else
   10180 		sc->sc_tipg = TIPG_1000T_DFLT;
   10181 
   10182 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10183 	if ((sc->sc_type == WM_T_82580)
   10184 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10185 	    || (sc->sc_type == WM_T_I211)) {
   10186 		reg = CSR_READ(sc, WMREG_PHPM);
   10187 		reg &= ~PHPM_GO_LINK_D;
   10188 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10189 	}
   10190 
   10191 	/*
   10192 	 * Let the chip set speed/duplex on its own based on
   10193 	 * signals from the PHY.
   10194 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10195 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10196 	 */
   10197 	sc->sc_ctrl |= CTRL_SLU;
   10198 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10199 
   10200 	/* Initialize our media structures and probe the GMII. */
   10201 	mii->mii_ifp = ifp;
   10202 
   10203 	mii->mii_statchg = wm_gmii_statchg;
   10204 
   10205 	/* get PHY control from SMBus to PCIe */
   10206 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10207 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10208 	    || (sc->sc_type == WM_T_PCH_CNP))
   10209 		wm_init_phy_workarounds_pchlan(sc);
   10210 
   10211 	wm_gmii_reset(sc);
   10212 
   10213 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10214 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10215 	    wm_gmii_mediastatus);
   10216 
   10217 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10218 	    || (sc->sc_type == WM_T_82580)
   10219 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10220 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10221 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10222 			/* Attach only one port */
   10223 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10224 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10225 		} else {
   10226 			int i, id;
   10227 			uint32_t ctrl_ext;
   10228 
   10229 			id = wm_get_phy_id_82575(sc);
   10230 			if (id != -1) {
   10231 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10232 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10233 			}
   10234 			if ((id == -1)
   10235 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10236 				/* Power on sgmii phy if it is disabled */
   10237 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10238 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10239 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10240 				CSR_WRITE_FLUSH(sc);
   10241 				delay(300*1000); /* XXX too long */
   10242 
   10243 				/* From 1 to 8 */
   10244 				for (i = 1; i < 8; i++)
   10245 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10246 					    0xffffffff, i, MII_OFFSET_ANY,
   10247 					    MIIF_DOPAUSE);
   10248 
   10249 				/* Restore previous sfp cage power state */
   10250 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10251 			}
   10252 		}
   10253 	} else
   10254 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10255 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10256 
   10257 	/*
   10258 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10259 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10260 	 */
   10261 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10262 		|| (sc->sc_type == WM_T_PCH_SPT)
   10263 		|| (sc->sc_type == WM_T_PCH_CNP))
   10264 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10265 		wm_set_mdio_slow_mode_hv(sc);
   10266 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10267 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10268 	}
   10269 
   10270 	/*
   10271 	 * (For ICH8 variants)
   10272 	 * If PHY detection failed, use BM's r/w function and retry.
   10273 	 */
   10274 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10275 		/* if failed, retry with *_bm_* */
   10276 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10277 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10278 		    sc->sc_phytype);
   10279 		sc->sc_phytype = WMPHY_BM;
   10280 		mii->mii_readreg = wm_gmii_bm_readreg;
   10281 		mii->mii_writereg = wm_gmii_bm_writereg;
   10282 
   10283 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10284 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10285 	}
   10286 
   10287 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10288 		/* Any PHY wasn't find */
   10289 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10290 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10291 		sc->sc_phytype = WMPHY_NONE;
   10292 	} else {
   10293 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10294 
   10295 		/*
   10296 		 * PHY Found! Check PHY type again by the second call of
   10297 		 * wm_gmii_setup_phytype.
   10298 		 */
   10299 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10300 		    child->mii_mpd_model);
   10301 
   10302 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10303 	}
   10304 }
   10305 
   10306 /*
   10307  * wm_gmii_mediachange:	[ifmedia interface function]
   10308  *
   10309  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10310  */
   10311 static int
   10312 wm_gmii_mediachange(struct ifnet *ifp)
   10313 {
   10314 	struct wm_softc *sc = ifp->if_softc;
   10315 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10316 	int rc;
   10317 
   10318 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10319 		device_xname(sc->sc_dev), __func__));
   10320 	if ((ifp->if_flags & IFF_UP) == 0)
   10321 		return 0;
   10322 
   10323 	/* Disable D0 LPLU. */
   10324 	wm_lplu_d0_disable(sc);
   10325 
   10326 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10327 	sc->sc_ctrl |= CTRL_SLU;
   10328 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10329 	    || (sc->sc_type > WM_T_82543)) {
   10330 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10331 	} else {
   10332 		sc->sc_ctrl &= ~CTRL_ASDE;
   10333 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10334 		if (ife->ifm_media & IFM_FDX)
   10335 			sc->sc_ctrl |= CTRL_FD;
   10336 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10337 		case IFM_10_T:
   10338 			sc->sc_ctrl |= CTRL_SPEED_10;
   10339 			break;
   10340 		case IFM_100_TX:
   10341 			sc->sc_ctrl |= CTRL_SPEED_100;
   10342 			break;
   10343 		case IFM_1000_T:
   10344 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10345 			break;
   10346 		case IFM_NONE:
   10347 			/* There is no specific setting for IFM_NONE */
   10348 			break;
   10349 		default:
   10350 			panic("wm_gmii_mediachange: bad media 0x%x",
   10351 			    ife->ifm_media);
   10352 		}
   10353 	}
   10354 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10355 	CSR_WRITE_FLUSH(sc);
   10356 	if (sc->sc_type <= WM_T_82543)
   10357 		wm_gmii_reset(sc);
   10358 
   10359 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10360 		return 0;
   10361 	return rc;
   10362 }
   10363 
   10364 /*
   10365  * wm_gmii_mediastatus:	[ifmedia interface function]
   10366  *
   10367  *	Get the current interface media status on a 1000BASE-T device.
   10368  */
   10369 static void
   10370 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10371 {
   10372 	struct wm_softc *sc = ifp->if_softc;
   10373 
   10374 	ether_mediastatus(ifp, ifmr);
   10375 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10376 	    | sc->sc_flowflags;
   10377 }
   10378 
   10379 #define	MDI_IO		CTRL_SWDPIN(2)
   10380 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10381 #define	MDI_CLK		CTRL_SWDPIN(3)
   10382 
   10383 static void
   10384 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10385 {
   10386 	uint32_t i, v;
   10387 
   10388 	v = CSR_READ(sc, WMREG_CTRL);
   10389 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10390 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10391 
   10392 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   10393 		if (data & i)
   10394 			v |= MDI_IO;
   10395 		else
   10396 			v &= ~MDI_IO;
   10397 		CSR_WRITE(sc, WMREG_CTRL, v);
   10398 		CSR_WRITE_FLUSH(sc);
   10399 		delay(10);
   10400 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10401 		CSR_WRITE_FLUSH(sc);
   10402 		delay(10);
   10403 		CSR_WRITE(sc, WMREG_CTRL, v);
   10404 		CSR_WRITE_FLUSH(sc);
   10405 		delay(10);
   10406 	}
   10407 }
   10408 
   10409 static uint16_t
   10410 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10411 {
   10412 	uint32_t v, i;
   10413 	uint16_t data = 0;
   10414 
   10415 	v = CSR_READ(sc, WMREG_CTRL);
   10416 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10417 	v |= CTRL_SWDPIO(3);
   10418 
   10419 	CSR_WRITE(sc, WMREG_CTRL, v);
   10420 	CSR_WRITE_FLUSH(sc);
   10421 	delay(10);
   10422 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10423 	CSR_WRITE_FLUSH(sc);
   10424 	delay(10);
   10425 	CSR_WRITE(sc, WMREG_CTRL, v);
   10426 	CSR_WRITE_FLUSH(sc);
   10427 	delay(10);
   10428 
   10429 	for (i = 0; i < 16; i++) {
   10430 		data <<= 1;
   10431 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10432 		CSR_WRITE_FLUSH(sc);
   10433 		delay(10);
   10434 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10435 			data |= 1;
   10436 		CSR_WRITE(sc, WMREG_CTRL, v);
   10437 		CSR_WRITE_FLUSH(sc);
   10438 		delay(10);
   10439 	}
   10440 
   10441 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10442 	CSR_WRITE_FLUSH(sc);
   10443 	delay(10);
   10444 	CSR_WRITE(sc, WMREG_CTRL, v);
   10445 	CSR_WRITE_FLUSH(sc);
   10446 	delay(10);
   10447 
   10448 	return data;
   10449 }
   10450 
   10451 #undef MDI_IO
   10452 #undef MDI_DIR
   10453 #undef MDI_CLK
   10454 
   10455 /*
   10456  * wm_gmii_i82543_readreg:	[mii interface function]
   10457  *
   10458  *	Read a PHY register on the GMII (i82543 version).
   10459  */
   10460 static int
   10461 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10462 {
   10463 	struct wm_softc *sc = device_private(dev);
   10464 
   10465 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10466 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10467 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10468 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10469 
   10470 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10471 		device_xname(dev), phy, reg, *val));
   10472 
   10473 	return 0;
   10474 }
   10475 
   10476 /*
   10477  * wm_gmii_i82543_writereg:	[mii interface function]
   10478  *
   10479  *	Write a PHY register on the GMII (i82543 version).
   10480  */
   10481 static int
   10482 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10483 {
   10484 	struct wm_softc *sc = device_private(dev);
   10485 
   10486 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10487 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10488 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10489 	    (MII_COMMAND_START << 30), 32);
   10490 
   10491 	return 0;
   10492 }
   10493 
   10494 /*
   10495  * wm_gmii_mdic_readreg:	[mii interface function]
   10496  *
   10497  *	Read a PHY register on the GMII.
   10498  */
   10499 static int
   10500 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10501 {
   10502 	struct wm_softc *sc = device_private(dev);
   10503 	uint32_t mdic = 0;
   10504 	int i;
   10505 
   10506 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10507 	    && (reg > MII_ADDRMASK)) {
   10508 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10509 		    __func__, sc->sc_phytype, reg);
   10510 		reg &= MII_ADDRMASK;
   10511 	}
   10512 
   10513 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10514 	    MDIC_REGADD(reg));
   10515 
   10516 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10517 		delay(50);
   10518 		mdic = CSR_READ(sc, WMREG_MDIC);
   10519 		if (mdic & MDIC_READY)
   10520 			break;
   10521 	}
   10522 
   10523 	if ((mdic & MDIC_READY) == 0) {
   10524 		DPRINTF(WM_DEBUG_GMII,
   10525 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10526 			device_xname(dev), phy, reg));
   10527 		return ETIMEDOUT;
   10528 	} else if (mdic & MDIC_E) {
   10529 		/* This is normal if no PHY is present. */
   10530 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10531 			device_xname(sc->sc_dev), phy, reg));
   10532 		return -1;
   10533 	} else
   10534 		*val = MDIC_DATA(mdic);
   10535 
   10536 	/*
   10537 	 * Allow some time after each MDIC transaction to avoid
   10538 	 * reading duplicate data in the next MDIC transaction.
   10539 	 */
   10540 	if (sc->sc_type == WM_T_PCH2)
   10541 		delay(100);
   10542 
   10543 	return 0;
   10544 }
   10545 
   10546 /*
   10547  * wm_gmii_mdic_writereg:	[mii interface function]
   10548  *
   10549  *	Write a PHY register on the GMII.
   10550  */
   10551 static int
   10552 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10553 {
   10554 	struct wm_softc *sc = device_private(dev);
   10555 	uint32_t mdic = 0;
   10556 	int i;
   10557 
   10558 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10559 	    && (reg > MII_ADDRMASK)) {
   10560 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10561 		    __func__, sc->sc_phytype, reg);
   10562 		reg &= MII_ADDRMASK;
   10563 	}
   10564 
   10565 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10566 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10567 
   10568 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10569 		delay(50);
   10570 		mdic = CSR_READ(sc, WMREG_MDIC);
   10571 		if (mdic & MDIC_READY)
   10572 			break;
   10573 	}
   10574 
   10575 	if ((mdic & MDIC_READY) == 0) {
   10576 		DPRINTF(WM_DEBUG_GMII,
   10577 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10578 			device_xname(dev), phy, reg));
   10579 		return ETIMEDOUT;
   10580 	} else if (mdic & MDIC_E) {
   10581 		DPRINTF(WM_DEBUG_GMII,
   10582 		    ("%s: MDIC write error: phy %d reg %d\n",
   10583 			device_xname(dev), phy, reg));
   10584 		return -1;
   10585 	}
   10586 
   10587 	/*
   10588 	 * Allow some time after each MDIC transaction to avoid
   10589 	 * reading duplicate data in the next MDIC transaction.
   10590 	 */
   10591 	if (sc->sc_type == WM_T_PCH2)
   10592 		delay(100);
   10593 
   10594 	return 0;
   10595 }
   10596 
   10597 /*
   10598  * wm_gmii_i82544_readreg:	[mii interface function]
   10599  *
   10600  *	Read a PHY register on the GMII.
   10601  */
   10602 static int
   10603 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10604 {
   10605 	struct wm_softc *sc = device_private(dev);
   10606 	int rv;
   10607 
   10608 	if (sc->phy.acquire(sc)) {
   10609 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10610 		return -1;
   10611 	}
   10612 
   10613 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10614 
   10615 	sc->phy.release(sc);
   10616 
   10617 	return rv;
   10618 }
   10619 
   10620 static int
   10621 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10622 {
   10623 	struct wm_softc *sc = device_private(dev);
   10624 	int rv;
   10625 
   10626 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10627 		switch (sc->sc_phytype) {
   10628 		case WMPHY_IGP:
   10629 		case WMPHY_IGP_2:
   10630 		case WMPHY_IGP_3:
   10631 			rv = wm_gmii_mdic_writereg(dev, phy,
   10632 			    MII_IGPHY_PAGE_SELECT, reg);
   10633 			if (rv != 0)
   10634 				return rv;
   10635 			break;
   10636 		default:
   10637 #ifdef WM_DEBUG
   10638 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10639 			    __func__, sc->sc_phytype, reg);
   10640 #endif
   10641 			break;
   10642 		}
   10643 	}
   10644 
   10645 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10646 }
   10647 
   10648 /*
   10649  * wm_gmii_i82544_writereg:	[mii interface function]
   10650  *
   10651  *	Write a PHY register on the GMII.
   10652  */
   10653 static int
   10654 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10655 {
   10656 	struct wm_softc *sc = device_private(dev);
   10657 	int rv;
   10658 
   10659 	if (sc->phy.acquire(sc)) {
   10660 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10661 		return -1;
   10662 	}
   10663 
   10664 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10665 	sc->phy.release(sc);
   10666 
   10667 	return rv;
   10668 }
   10669 
   10670 static int
   10671 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10672 {
   10673 	struct wm_softc *sc = device_private(dev);
   10674 	int rv;
   10675 
   10676 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10677 		switch (sc->sc_phytype) {
   10678 		case WMPHY_IGP:
   10679 		case WMPHY_IGP_2:
   10680 		case WMPHY_IGP_3:
   10681 			rv = wm_gmii_mdic_writereg(dev, phy,
   10682 			    MII_IGPHY_PAGE_SELECT, reg);
   10683 			if (rv != 0)
   10684 				return rv;
   10685 			break;
   10686 		default:
   10687 #ifdef WM_DEBUG
   10688 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10689 			    __func__, sc->sc_phytype, reg);
   10690 #endif
   10691 			break;
   10692 		}
   10693 	}
   10694 
   10695 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10696 }
   10697 
   10698 /*
   10699  * wm_gmii_i80003_readreg:	[mii interface function]
   10700  *
   10701  *	Read a PHY register on the kumeran
   10702  * This could be handled by the PHY layer if we didn't have to lock the
   10703  * ressource ...
   10704  */
   10705 static int
   10706 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10707 {
   10708 	struct wm_softc *sc = device_private(dev);
   10709 	int page_select;
   10710 	uint16_t temp, temp2;
   10711 	int rv = 0;
   10712 
   10713 	if (phy != 1) /* Only one PHY on kumeran bus */
   10714 		return -1;
   10715 
   10716 	if (sc->phy.acquire(sc)) {
   10717 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10718 		return -1;
   10719 	}
   10720 
   10721 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10722 		page_select = GG82563_PHY_PAGE_SELECT;
   10723 	else {
   10724 		/*
   10725 		 * Use Alternative Page Select register to access registers
   10726 		 * 30 and 31.
   10727 		 */
   10728 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10729 	}
   10730 	temp = reg >> GG82563_PAGE_SHIFT;
   10731 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10732 		goto out;
   10733 
   10734 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10735 		/*
   10736 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10737 		 * register.
   10738 		 */
   10739 		delay(200);
   10740 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10741 		if ((rv != 0) || (temp2 != temp)) {
   10742 			device_printf(dev, "%s failed\n", __func__);
   10743 			rv = -1;
   10744 			goto out;
   10745 		}
   10746 		delay(200);
   10747 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10748 		delay(200);
   10749 	} else
   10750 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10751 
   10752 out:
   10753 	sc->phy.release(sc);
   10754 	return rv;
   10755 }
   10756 
   10757 /*
   10758  * wm_gmii_i80003_writereg:	[mii interface function]
   10759  *
   10760  *	Write a PHY register on the kumeran.
   10761  * This could be handled by the PHY layer if we didn't have to lock the
   10762  * ressource ...
   10763  */
   10764 static int
   10765 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   10766 {
   10767 	struct wm_softc *sc = device_private(dev);
   10768 	int page_select, rv;
   10769 	uint16_t temp, temp2;
   10770 
   10771 	if (phy != 1) /* Only one PHY on kumeran bus */
   10772 		return -1;
   10773 
   10774 	if (sc->phy.acquire(sc)) {
   10775 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10776 		return -1;
   10777 	}
   10778 
   10779 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10780 		page_select = GG82563_PHY_PAGE_SELECT;
   10781 	else {
   10782 		/*
   10783 		 * Use Alternative Page Select register to access registers
   10784 		 * 30 and 31.
   10785 		 */
   10786 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10787 	}
   10788 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10789 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10790 		goto out;
   10791 
   10792 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10793 		/*
   10794 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10795 		 * register.
   10796 		 */
   10797 		delay(200);
   10798 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10799 		if ((rv != 0) || (temp2 != temp)) {
   10800 			device_printf(dev, "%s failed\n", __func__);
   10801 			rv = -1;
   10802 			goto out;
   10803 		}
   10804 		delay(200);
   10805 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10806 		delay(200);
   10807 	} else
   10808 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10809 
   10810 out:
   10811 	sc->phy.release(sc);
   10812 	return rv;
   10813 }
   10814 
   10815 /*
   10816  * wm_gmii_bm_readreg:	[mii interface function]
   10817  *
   10818  *	Read a PHY register on the kumeran
   10819  * This could be handled by the PHY layer if we didn't have to lock the
   10820  * ressource ...
   10821  */
   10822 static int
   10823 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10824 {
   10825 	struct wm_softc *sc = device_private(dev);
   10826 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10827 	int rv;
   10828 
   10829 	if (sc->phy.acquire(sc)) {
   10830 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10831 		return -1;
   10832 	}
   10833 
   10834 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10835 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10836 		    || (reg == 31)) ? 1 : phy;
   10837 	/* Page 800 works differently than the rest so it has its own func */
   10838 	if (page == BM_WUC_PAGE) {
   10839 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   10840 		goto release;
   10841 	}
   10842 
   10843 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10844 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10845 		    && (sc->sc_type != WM_T_82583))
   10846 			rv = wm_gmii_mdic_writereg(dev, phy,
   10847 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10848 		else
   10849 			rv = wm_gmii_mdic_writereg(dev, phy,
   10850 			    BME1000_PHY_PAGE_SELECT, page);
   10851 		if (rv != 0)
   10852 			goto release;
   10853 	}
   10854 
   10855 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10856 
   10857 release:
   10858 	sc->phy.release(sc);
   10859 	return rv;
   10860 }
   10861 
   10862 /*
   10863  * wm_gmii_bm_writereg:	[mii interface function]
   10864  *
   10865  *	Write a PHY register on the kumeran.
   10866  * This could be handled by the PHY layer if we didn't have to lock the
   10867  * ressource ...
   10868  */
   10869 static int
   10870 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   10871 {
   10872 	struct wm_softc *sc = device_private(dev);
   10873 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10874 	int rv;
   10875 
   10876 	if (sc->phy.acquire(sc)) {
   10877 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10878 		return -1;
   10879 	}
   10880 
   10881 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10882 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10883 		    || (reg == 31)) ? 1 : phy;
   10884 	/* Page 800 works differently than the rest so it has its own func */
   10885 	if (page == BM_WUC_PAGE) {
   10886 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   10887 		goto release;
   10888 	}
   10889 
   10890 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10891 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10892 		    && (sc->sc_type != WM_T_82583))
   10893 			rv = wm_gmii_mdic_writereg(dev, phy,
   10894 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10895 		else
   10896 			rv = wm_gmii_mdic_writereg(dev, phy,
   10897 			    BME1000_PHY_PAGE_SELECT, page);
   10898 		if (rv != 0)
   10899 			goto release;
   10900 	}
   10901 
   10902 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10903 
   10904 release:
   10905 	sc->phy.release(sc);
   10906 	return rv;
   10907 }
   10908 
   10909 /*
   10910  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   10911  *  @dev: pointer to the HW structure
   10912  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   10913  *
   10914  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   10915  *  address to store contents of the BM_WUC_ENABLE_REG register.
   10916  */
   10917 static int
   10918 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10919 {
   10920 	uint16_t temp;
   10921 	int rv;
   10922 
   10923 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10924 		device_xname(dev), __func__));
   10925 
   10926 	if (!phy_regp)
   10927 		return -1;
   10928 
   10929 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   10930 
   10931 	/* Select Port Control Registers page */
   10932 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10933 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10934 	if (rv != 0)
   10935 		return rv;
   10936 
   10937 	/* Read WUCE and save it */
   10938 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   10939 	if (rv != 0)
   10940 		return rv;
   10941 
   10942 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   10943 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   10944 	 */
   10945 	temp = *phy_regp;
   10946 	temp |= BM_WUC_ENABLE_BIT;
   10947 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10948 
   10949 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   10950 		return rv;
   10951 
   10952 	/* Select Host Wakeup Registers page - caller now able to write
   10953 	 * registers on the Wakeup registers page
   10954 	 */
   10955 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10956 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   10957 }
   10958 
   10959 /*
   10960  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   10961  *  @dev: pointer to the HW structure
   10962  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   10963  *
   10964  *  Restore BM_WUC_ENABLE_REG to its original value.
   10965  *
   10966  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   10967  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   10968  *  caller.
   10969  */
   10970 static int
   10971 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10972 {
   10973 
   10974 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10975 		device_xname(dev), __func__));
   10976 
   10977 	if (!phy_regp)
   10978 		return -1;
   10979 
   10980 	/* Select Port Control Registers page */
   10981 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10982 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10983 
   10984 	/* Restore 769.17 to its original value */
   10985 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   10986 
   10987 	return 0;
   10988 }
   10989 
   10990 /*
   10991  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   10992  *  @sc: pointer to the HW structure
   10993  *  @offset: register offset to be read or written
   10994  *  @val: pointer to the data to read or write
   10995  *  @rd: determines if operation is read or write
   10996  *  @page_set: BM_WUC_PAGE already set and access enabled
   10997  *
   10998  *  Read the PHY register at offset and store the retrieved information in
   10999  *  data, or write data to PHY register at offset.  Note the procedure to
   11000  *  access the PHY wakeup registers is different than reading the other PHY
   11001  *  registers. It works as such:
   11002  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11003  *  2) Set page to 800 for host (801 if we were manageability)
   11004  *  3) Write the address using the address opcode (0x11)
   11005  *  4) Read or write the data using the data opcode (0x12)
   11006  *  5) Restore 769.17.2 to its original value
   11007  *
   11008  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11009  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11010  *
   11011  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11012  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11013  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11014  */
   11015 static int
   11016 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11017 	bool page_set)
   11018 {
   11019 	struct wm_softc *sc = device_private(dev);
   11020 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11021 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11022 	uint16_t wuce;
   11023 	int rv = 0;
   11024 
   11025 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11026 		device_xname(dev), __func__));
   11027 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11028 	if ((sc->sc_type == WM_T_PCH)
   11029 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11030 		device_printf(dev,
   11031 		    "Attempting to access page %d while gig enabled.\n", page);
   11032 	}
   11033 
   11034 	if (!page_set) {
   11035 		/* Enable access to PHY wakeup registers */
   11036 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11037 		if (rv != 0) {
   11038 			device_printf(dev,
   11039 			    "%s: Could not enable PHY wakeup reg access\n",
   11040 			    __func__);
   11041 			return rv;
   11042 		}
   11043 	}
   11044 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11045 		device_xname(sc->sc_dev), __func__, page, regnum));
   11046 
   11047 	/*
   11048 	 * 2) Access PHY wakeup register.
   11049 	 * See wm_access_phy_wakeup_reg_bm.
   11050 	 */
   11051 
   11052 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11053 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11054 	if (rv != 0)
   11055 		return rv;
   11056 
   11057 	if (rd) {
   11058 		/* Read the Wakeup register page value using opcode 0x12 */
   11059 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11060 	} else {
   11061 		/* Write the Wakeup register page value using opcode 0x12 */
   11062 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11063 	}
   11064 	if (rv != 0)
   11065 		return rv;
   11066 
   11067 	if (!page_set)
   11068 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11069 
   11070 	return rv;
   11071 }
   11072 
   11073 /*
   11074  * wm_gmii_hv_readreg:	[mii interface function]
   11075  *
   11076  *	Read a PHY register on the kumeran
   11077  * This could be handled by the PHY layer if we didn't have to lock the
   11078  * ressource ...
   11079  */
   11080 static int
   11081 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11082 {
   11083 	struct wm_softc *sc = device_private(dev);
   11084 	int rv;
   11085 
   11086 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11087 		device_xname(dev), __func__));
   11088 	if (sc->phy.acquire(sc)) {
   11089 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11090 		return -1;
   11091 	}
   11092 
   11093 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11094 	sc->phy.release(sc);
   11095 	return rv;
   11096 }
   11097 
   11098 static int
   11099 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11100 {
   11101 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11102 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11103 	int rv;
   11104 
   11105 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11106 
   11107 	/* Page 800 works differently than the rest so it has its own func */
   11108 	if (page == BM_WUC_PAGE)
   11109 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11110 
   11111 	/*
   11112 	 * Lower than page 768 works differently than the rest so it has its
   11113 	 * own func
   11114 	 */
   11115 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11116 		printf("gmii_hv_readreg!!!\n");
   11117 		return -1;
   11118 	}
   11119 
   11120 	/*
   11121 	 * XXX I21[789] documents say that the SMBus Address register is at
   11122 	 * PHY address 01, Page 0 (not 768), Register 26.
   11123 	 */
   11124 	if (page == HV_INTC_FC_PAGE_START)
   11125 		page = 0;
   11126 
   11127 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11128 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11129 		    page << BME1000_PAGE_SHIFT);
   11130 		if (rv != 0)
   11131 			return rv;
   11132 	}
   11133 
   11134 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11135 }
   11136 
   11137 /*
   11138  * wm_gmii_hv_writereg:	[mii interface function]
   11139  *
   11140  *	Write a PHY register on the kumeran.
   11141  * This could be handled by the PHY layer if we didn't have to lock the
   11142  * ressource ...
   11143  */
   11144 static int
   11145 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11146 {
   11147 	struct wm_softc *sc = device_private(dev);
   11148 	int rv;
   11149 
   11150 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11151 		device_xname(dev), __func__));
   11152 
   11153 	if (sc->phy.acquire(sc)) {
   11154 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11155 		return -1;
   11156 	}
   11157 
   11158 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11159 	sc->phy.release(sc);
   11160 
   11161 	return rv;
   11162 }
   11163 
   11164 static int
   11165 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11166 {
   11167 	struct wm_softc *sc = device_private(dev);
   11168 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11169 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11170 	int rv;
   11171 
   11172 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11173 
   11174 	/* Page 800 works differently than the rest so it has its own func */
   11175 	if (page == BM_WUC_PAGE)
   11176 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11177 		    false);
   11178 
   11179 	/*
   11180 	 * Lower than page 768 works differently than the rest so it has its
   11181 	 * own func
   11182 	 */
   11183 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11184 		printf("gmii_hv_writereg!!!\n");
   11185 		return -1;
   11186 	}
   11187 
   11188 	{
   11189 		/*
   11190 		 * XXX I21[789] documents say that the SMBus Address register
   11191 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11192 		 */
   11193 		if (page == HV_INTC_FC_PAGE_START)
   11194 			page = 0;
   11195 
   11196 		/*
   11197 		 * XXX Workaround MDIO accesses being disabled after entering
   11198 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11199 		 * register is set)
   11200 		 */
   11201 		if (sc->sc_phytype == WMPHY_82578) {
   11202 			struct mii_softc *child;
   11203 
   11204 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11205 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11206 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11207 			    && ((val & (1 << 11)) != 0)) {
   11208 				printf("XXX need workaround\n");
   11209 			}
   11210 		}
   11211 
   11212 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11213 			rv = wm_gmii_mdic_writereg(dev, 1,
   11214 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11215 			if (rv != 0)
   11216 				return rv;
   11217 		}
   11218 	}
   11219 
   11220 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11221 }
   11222 
   11223 /*
   11224  * wm_gmii_82580_readreg:	[mii interface function]
   11225  *
   11226  *	Read a PHY register on the 82580 and I350.
   11227  * This could be handled by the PHY layer if we didn't have to lock the
   11228  * ressource ...
   11229  */
   11230 static int
   11231 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11232 {
   11233 	struct wm_softc *sc = device_private(dev);
   11234 	int rv;
   11235 
   11236 	if (sc->phy.acquire(sc) != 0) {
   11237 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11238 		return -1;
   11239 	}
   11240 
   11241 #ifdef DIAGNOSTIC
   11242 	if (reg > MII_ADDRMASK) {
   11243 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11244 		    __func__, sc->sc_phytype, reg);
   11245 		reg &= MII_ADDRMASK;
   11246 	}
   11247 #endif
   11248 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11249 
   11250 	sc->phy.release(sc);
   11251 	return rv;
   11252 }
   11253 
   11254 /*
   11255  * wm_gmii_82580_writereg:	[mii interface function]
   11256  *
   11257  *	Write a PHY register on the 82580 and I350.
   11258  * This could be handled by the PHY layer if we didn't have to lock the
   11259  * ressource ...
   11260  */
   11261 static int
   11262 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11263 {
   11264 	struct wm_softc *sc = device_private(dev);
   11265 	int rv;
   11266 
   11267 	if (sc->phy.acquire(sc) != 0) {
   11268 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11269 		return -1;
   11270 	}
   11271 
   11272 #ifdef DIAGNOSTIC
   11273 	if (reg > MII_ADDRMASK) {
   11274 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11275 		    __func__, sc->sc_phytype, reg);
   11276 		reg &= MII_ADDRMASK;
   11277 	}
   11278 #endif
   11279 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11280 
   11281 	sc->phy.release(sc);
   11282 	return rv;
   11283 }
   11284 
   11285 /*
   11286  * wm_gmii_gs40g_readreg:	[mii interface function]
   11287  *
   11288  *	Read a PHY register on the I2100 and I211.
   11289  * This could be handled by the PHY layer if we didn't have to lock the
   11290  * ressource ...
   11291  */
   11292 static int
   11293 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11294 {
   11295 	struct wm_softc *sc = device_private(dev);
   11296 	int page, offset;
   11297 	int rv;
   11298 
   11299 	/* Acquire semaphore */
   11300 	if (sc->phy.acquire(sc)) {
   11301 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11302 		return -1;
   11303 	}
   11304 
   11305 	/* Page select */
   11306 	page = reg >> GS40G_PAGE_SHIFT;
   11307 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11308 	if (rv != 0)
   11309 		goto release;
   11310 
   11311 	/* Read reg */
   11312 	offset = reg & GS40G_OFFSET_MASK;
   11313 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11314 
   11315 release:
   11316 	sc->phy.release(sc);
   11317 	return rv;
   11318 }
   11319 
   11320 /*
   11321  * wm_gmii_gs40g_writereg:	[mii interface function]
   11322  *
   11323  *	Write a PHY register on the I210 and I211.
   11324  * This could be handled by the PHY layer if we didn't have to lock the
   11325  * ressource ...
   11326  */
   11327 static int
   11328 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11329 {
   11330 	struct wm_softc *sc = device_private(dev);
   11331 	uint16_t page;
   11332 	int offset, rv;
   11333 
   11334 	/* Acquire semaphore */
   11335 	if (sc->phy.acquire(sc)) {
   11336 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11337 		return -1;
   11338 	}
   11339 
   11340 	/* Page select */
   11341 	page = reg >> GS40G_PAGE_SHIFT;
   11342 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11343 	if (rv != 0)
   11344 		goto release;
   11345 
   11346 	/* Write reg */
   11347 	offset = reg & GS40G_OFFSET_MASK;
   11348 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11349 
   11350 release:
   11351 	/* Release semaphore */
   11352 	sc->phy.release(sc);
   11353 	return rv;
   11354 }
   11355 
   11356 /*
   11357  * wm_gmii_statchg:	[mii interface function]
   11358  *
   11359  *	Callback from MII layer when media changes.
   11360  */
   11361 static void
   11362 wm_gmii_statchg(struct ifnet *ifp)
   11363 {
   11364 	struct wm_softc *sc = ifp->if_softc;
   11365 	struct mii_data *mii = &sc->sc_mii;
   11366 
   11367 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11368 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11369 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11370 
   11371 	/* Get flow control negotiation result. */
   11372 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11373 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11374 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11375 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11376 	}
   11377 
   11378 	if (sc->sc_flowflags & IFM_FLOW) {
   11379 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11380 			sc->sc_ctrl |= CTRL_TFCE;
   11381 			sc->sc_fcrtl |= FCRTL_XONE;
   11382 		}
   11383 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11384 			sc->sc_ctrl |= CTRL_RFCE;
   11385 	}
   11386 
   11387 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   11388 		DPRINTF(WM_DEBUG_LINK,
   11389 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11390 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11391 	} else {
   11392 		DPRINTF(WM_DEBUG_LINK,
   11393 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11394 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11395 	}
   11396 
   11397 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11398 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11399 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11400 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11401 	if (sc->sc_type == WM_T_80003) {
   11402 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   11403 		case IFM_1000_T:
   11404 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11405 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11406 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11407 			break;
   11408 		default:
   11409 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11410 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11411 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11412 			break;
   11413 		}
   11414 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11415 	}
   11416 }
   11417 
   11418 /* kumeran related (80003, ICH* and PCH*) */
   11419 
   11420 /*
   11421  * wm_kmrn_readreg:
   11422  *
   11423  *	Read a kumeran register
   11424  */
   11425 static int
   11426 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11427 {
   11428 	int rv;
   11429 
   11430 	if (sc->sc_type == WM_T_80003)
   11431 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11432 	else
   11433 		rv = sc->phy.acquire(sc);
   11434 	if (rv != 0) {
   11435 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11436 		    __func__);
   11437 		return rv;
   11438 	}
   11439 
   11440 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11441 
   11442 	if (sc->sc_type == WM_T_80003)
   11443 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11444 	else
   11445 		sc->phy.release(sc);
   11446 
   11447 	return rv;
   11448 }
   11449 
   11450 static int
   11451 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11452 {
   11453 
   11454 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11455 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11456 	    KUMCTRLSTA_REN);
   11457 	CSR_WRITE_FLUSH(sc);
   11458 	delay(2);
   11459 
   11460 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11461 
   11462 	return 0;
   11463 }
   11464 
   11465 /*
   11466  * wm_kmrn_writereg:
   11467  *
   11468  *	Write a kumeran register
   11469  */
   11470 static int
   11471 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11472 {
   11473 	int rv;
   11474 
   11475 	if (sc->sc_type == WM_T_80003)
   11476 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11477 	else
   11478 		rv = sc->phy.acquire(sc);
   11479 	if (rv != 0) {
   11480 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11481 		    __func__);
   11482 		return rv;
   11483 	}
   11484 
   11485 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11486 
   11487 	if (sc->sc_type == WM_T_80003)
   11488 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11489 	else
   11490 		sc->phy.release(sc);
   11491 
   11492 	return rv;
   11493 }
   11494 
   11495 static int
   11496 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11497 {
   11498 
   11499 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11500 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11501 
   11502 	return 0;
   11503 }
   11504 
   11505 /*
   11506  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11507  * This access method is different from IEEE MMD.
   11508  */
   11509 static int
   11510 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11511 {
   11512 	struct wm_softc *sc = device_private(dev);
   11513 	int rv;
   11514 
   11515 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11516 	if (rv != 0)
   11517 		return rv;
   11518 
   11519 	if (rd)
   11520 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11521 	else
   11522 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11523 	return rv;
   11524 }
   11525 
   11526 static int
   11527 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11528 {
   11529 
   11530 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11531 }
   11532 
   11533 static int
   11534 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11535 {
   11536 
   11537 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11538 }
   11539 
   11540 /* SGMII related */
   11541 
   11542 /*
   11543  * wm_sgmii_uses_mdio
   11544  *
   11545  * Check whether the transaction is to the internal PHY or the external
   11546  * MDIO interface. Return true if it's MDIO.
   11547  */
   11548 static bool
   11549 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11550 {
   11551 	uint32_t reg;
   11552 	bool ismdio = false;
   11553 
   11554 	switch (sc->sc_type) {
   11555 	case WM_T_82575:
   11556 	case WM_T_82576:
   11557 		reg = CSR_READ(sc, WMREG_MDIC);
   11558 		ismdio = ((reg & MDIC_DEST) != 0);
   11559 		break;
   11560 	case WM_T_82580:
   11561 	case WM_T_I350:
   11562 	case WM_T_I354:
   11563 	case WM_T_I210:
   11564 	case WM_T_I211:
   11565 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11566 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11567 		break;
   11568 	default:
   11569 		break;
   11570 	}
   11571 
   11572 	return ismdio;
   11573 }
   11574 
   11575 /*
   11576  * wm_sgmii_readreg:	[mii interface function]
   11577  *
   11578  *	Read a PHY register on the SGMII
   11579  * This could be handled by the PHY layer if we didn't have to lock the
   11580  * ressource ...
   11581  */
   11582 static int
   11583 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11584 {
   11585 	struct wm_softc *sc = device_private(dev);
   11586 	int rv;
   11587 
   11588 	if (sc->phy.acquire(sc)) {
   11589 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11590 		return -1;
   11591 	}
   11592 
   11593 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11594 
   11595 	sc->phy.release(sc);
   11596 	return rv;
   11597 }
   11598 
   11599 static int
   11600 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11601 {
   11602 	struct wm_softc *sc = device_private(dev);
   11603 	uint32_t i2ccmd;
   11604 	int i, rv;
   11605 
   11606 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11607 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11608 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11609 
   11610 	/* Poll the ready bit */
   11611 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11612 		delay(50);
   11613 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11614 		if (i2ccmd & I2CCMD_READY)
   11615 			break;
   11616 	}
   11617 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11618 		device_printf(dev, "I2CCMD Read did not complete\n");
   11619 		rv = ETIMEDOUT;
   11620 	}
   11621 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11622 		device_printf(dev, "I2CCMD Error bit set\n");
   11623 		rv = EIO;
   11624 	}
   11625 
   11626 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11627 
   11628 	return rv;
   11629 }
   11630 
   11631 /*
   11632  * wm_sgmii_writereg:	[mii interface function]
   11633  *
   11634  *	Write a PHY register on the SGMII.
   11635  * This could be handled by the PHY layer if we didn't have to lock the
   11636  * ressource ...
   11637  */
   11638 static int
   11639 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11640 {
   11641 	struct wm_softc *sc = device_private(dev);
   11642 	int rv;
   11643 
   11644 	if (sc->phy.acquire(sc) != 0) {
   11645 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11646 		return -1;
   11647 	}
   11648 
   11649 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11650 
   11651 	sc->phy.release(sc);
   11652 
   11653 	return rv;
   11654 }
   11655 
   11656 static int
   11657 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11658 {
   11659 	struct wm_softc *sc = device_private(dev);
   11660 	uint32_t i2ccmd;
   11661 	uint16_t swapdata;
   11662 	int rv = 0;
   11663 	int i;
   11664 
   11665 	/* Swap the data bytes for the I2C interface */
   11666 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11667 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11668 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11669 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11670 
   11671 	/* Poll the ready bit */
   11672 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11673 		delay(50);
   11674 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11675 		if (i2ccmd & I2CCMD_READY)
   11676 			break;
   11677 	}
   11678 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11679 		device_printf(dev, "I2CCMD Write did not complete\n");
   11680 		rv = ETIMEDOUT;
   11681 	}
   11682 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11683 		device_printf(dev, "I2CCMD Error bit set\n");
   11684 		rv = EIO;
   11685 	}
   11686 
   11687 	return rv;
   11688 }
   11689 
   11690 /* TBI related */
   11691 
   11692 static bool
   11693 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11694 {
   11695 	bool sig;
   11696 
   11697 	sig = ctrl & CTRL_SWDPIN(1);
   11698 
   11699 	/*
   11700 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11701 	 * detect a signal, 1 if they don't.
   11702 	 */
   11703 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11704 		sig = !sig;
   11705 
   11706 	return sig;
   11707 }
   11708 
   11709 /*
   11710  * wm_tbi_mediainit:
   11711  *
   11712  *	Initialize media for use on 1000BASE-X devices.
   11713  */
   11714 static void
   11715 wm_tbi_mediainit(struct wm_softc *sc)
   11716 {
   11717 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11718 	const char *sep = "";
   11719 
   11720 	if (sc->sc_type < WM_T_82543)
   11721 		sc->sc_tipg = TIPG_WM_DFLT;
   11722 	else
   11723 		sc->sc_tipg = TIPG_LG_DFLT;
   11724 
   11725 	sc->sc_tbi_serdes_anegticks = 5;
   11726 
   11727 	/* Initialize our media structures */
   11728 	sc->sc_mii.mii_ifp = ifp;
   11729 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11730 
   11731 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11732 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11733 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11734 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11735 	else
   11736 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11737 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11738 
   11739 	/*
   11740 	 * SWD Pins:
   11741 	 *
   11742 	 *	0 = Link LED (output)
   11743 	 *	1 = Loss Of Signal (input)
   11744 	 */
   11745 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11746 
   11747 	/* XXX Perhaps this is only for TBI */
   11748 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11749 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11750 
   11751 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11752 		sc->sc_ctrl &= ~CTRL_LRST;
   11753 
   11754 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11755 
   11756 #define	ADD(ss, mm, dd)							\
   11757 do {									\
   11758 	aprint_normal("%s%s", sep, ss);					\
   11759 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11760 	sep = ", ";							\
   11761 } while (/*CONSTCOND*/0)
   11762 
   11763 	aprint_normal_dev(sc->sc_dev, "");
   11764 
   11765 	if (sc->sc_type == WM_T_I354) {
   11766 		uint32_t status;
   11767 
   11768 		status = CSR_READ(sc, WMREG_STATUS);
   11769 		if (((status & STATUS_2P5_SKU) != 0)
   11770 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11771 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11772 		} else
   11773 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11774 	} else if (sc->sc_type == WM_T_82545) {
   11775 		/* Only 82545 is LX (XXX except SFP) */
   11776 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11777 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11778 	} else {
   11779 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11780 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11781 	}
   11782 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11783 	aprint_normal("\n");
   11784 
   11785 #undef ADD
   11786 
   11787 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11788 }
   11789 
   11790 /*
   11791  * wm_tbi_mediachange:	[ifmedia interface function]
   11792  *
   11793  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11794  */
   11795 static int
   11796 wm_tbi_mediachange(struct ifnet *ifp)
   11797 {
   11798 	struct wm_softc *sc = ifp->if_softc;
   11799 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11800 	uint32_t status, ctrl;
   11801 	bool signal;
   11802 	int i;
   11803 
   11804 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11805 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11806 		/* XXX need some work for >= 82571 and < 82575 */
   11807 		if (sc->sc_type < WM_T_82575)
   11808 			return 0;
   11809 	}
   11810 
   11811 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11812 	    || (sc->sc_type >= WM_T_82575))
   11813 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11814 
   11815 	sc->sc_ctrl &= ~CTRL_LRST;
   11816 	sc->sc_txcw = TXCW_ANE;
   11817 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11818 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11819 	else if (ife->ifm_media & IFM_FDX)
   11820 		sc->sc_txcw |= TXCW_FD;
   11821 	else
   11822 		sc->sc_txcw |= TXCW_HD;
   11823 
   11824 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11825 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11826 
   11827 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11828 		device_xname(sc->sc_dev), sc->sc_txcw));
   11829 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11830 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11831 	CSR_WRITE_FLUSH(sc);
   11832 	delay(1000);
   11833 
   11834 	ctrl =  CSR_READ(sc, WMREG_CTRL);
   11835 	signal = wm_tbi_havesignal(sc, ctrl);
   11836 
   11837 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11838 		signal));
   11839 
   11840 	if (signal) {
   11841 		/* Have signal; wait for the link to come up. */
   11842 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11843 			delay(10000);
   11844 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11845 				break;
   11846 		}
   11847 
   11848 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11849 			device_xname(sc->sc_dev), i));
   11850 
   11851 		status = CSR_READ(sc, WMREG_STATUS);
   11852 		DPRINTF(WM_DEBUG_LINK,
   11853 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11854 			device_xname(sc->sc_dev), status, STATUS_LU));
   11855 		if (status & STATUS_LU) {
   11856 			/* Link is up. */
   11857 			DPRINTF(WM_DEBUG_LINK,
   11858 			    ("%s: LINK: set media -> link up %s\n",
   11859 				device_xname(sc->sc_dev),
   11860 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11861 
   11862 			/*
   11863 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11864 			 * so we should update sc->sc_ctrl
   11865 			 */
   11866 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11867 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11868 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11869 			if (status & STATUS_FD)
   11870 				sc->sc_tctl |=
   11871 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11872 			else
   11873 				sc->sc_tctl |=
   11874 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11875 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11876 				sc->sc_fcrtl |= FCRTL_XONE;
   11877 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11878 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11879 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11880 			sc->sc_tbi_linkup = 1;
   11881 		} else {
   11882 			if (i == WM_LINKUP_TIMEOUT)
   11883 				wm_check_for_link(sc);
   11884 			/* Link is down. */
   11885 			DPRINTF(WM_DEBUG_LINK,
   11886 			    ("%s: LINK: set media -> link down\n",
   11887 				device_xname(sc->sc_dev)));
   11888 			sc->sc_tbi_linkup = 0;
   11889 		}
   11890 	} else {
   11891 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11892 			device_xname(sc->sc_dev)));
   11893 		sc->sc_tbi_linkup = 0;
   11894 	}
   11895 
   11896 	wm_tbi_serdes_set_linkled(sc);
   11897 
   11898 	return 0;
   11899 }
   11900 
   11901 /*
   11902  * wm_tbi_mediastatus:	[ifmedia interface function]
   11903  *
   11904  *	Get the current interface media status on a 1000BASE-X device.
   11905  */
   11906 static void
   11907 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11908 {
   11909 	struct wm_softc *sc = ifp->if_softc;
   11910 	uint32_t ctrl, status;
   11911 
   11912 	ifmr->ifm_status = IFM_AVALID;
   11913 	ifmr->ifm_active = IFM_ETHER;
   11914 
   11915 	status = CSR_READ(sc, WMREG_STATUS);
   11916 	if ((status & STATUS_LU) == 0) {
   11917 		ifmr->ifm_active |= IFM_NONE;
   11918 		return;
   11919 	}
   11920 
   11921 	ifmr->ifm_status |= IFM_ACTIVE;
   11922 	/* Only 82545 is LX */
   11923 	if (sc->sc_type == WM_T_82545)
   11924 		ifmr->ifm_active |= IFM_1000_LX;
   11925 	else
   11926 		ifmr->ifm_active |= IFM_1000_SX;
   11927 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11928 		ifmr->ifm_active |= IFM_FDX;
   11929 	else
   11930 		ifmr->ifm_active |= IFM_HDX;
   11931 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11932 	if (ctrl & CTRL_RFCE)
   11933 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11934 	if (ctrl & CTRL_TFCE)
   11935 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11936 }
   11937 
   11938 /* XXX TBI only */
   11939 static int
   11940 wm_check_for_link(struct wm_softc *sc)
   11941 {
   11942 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11943 	uint32_t rxcw;
   11944 	uint32_t ctrl;
   11945 	uint32_t status;
   11946 	bool signal;
   11947 
   11948 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11949 		device_xname(sc->sc_dev), __func__));
   11950 
   11951 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11952 		/* XXX need some work for >= 82571 */
   11953 		if (sc->sc_type >= WM_T_82571) {
   11954 			sc->sc_tbi_linkup = 1;
   11955 			return 0;
   11956 		}
   11957 	}
   11958 
   11959 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11960 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11961 	status = CSR_READ(sc, WMREG_STATUS);
   11962 	signal = wm_tbi_havesignal(sc, ctrl);
   11963 
   11964 	DPRINTF(WM_DEBUG_LINK,
   11965 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11966 		device_xname(sc->sc_dev), __func__, signal,
   11967 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11968 
   11969 	/*
   11970 	 * SWDPIN   LU RXCW
   11971 	 *	0    0	  0
   11972 	 *	0    0	  1	(should not happen)
   11973 	 *	0    1	  0	(should not happen)
   11974 	 *	0    1	  1	(should not happen)
   11975 	 *	1    0	  0	Disable autonego and force linkup
   11976 	 *	1    0	  1	got /C/ but not linkup yet
   11977 	 *	1    1	  0	(linkup)
   11978 	 *	1    1	  1	If IFM_AUTO, back to autonego
   11979 	 *
   11980 	 */
   11981 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   11982 		DPRINTF(WM_DEBUG_LINK,
   11983 		    ("%s: %s: force linkup and fullduplex\n",
   11984 			device_xname(sc->sc_dev), __func__));
   11985 		sc->sc_tbi_linkup = 0;
   11986 		/* Disable auto-negotiation in the TXCW register */
   11987 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11988 
   11989 		/*
   11990 		 * Force link-up and also force full-duplex.
   11991 		 *
   11992 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11993 		 * so we should update sc->sc_ctrl
   11994 		 */
   11995 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11996 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11997 	} else if (((status & STATUS_LU) != 0)
   11998 	    && ((rxcw & RXCW_C) != 0)
   11999 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12000 		sc->sc_tbi_linkup = 1;
   12001 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12002 			device_xname(sc->sc_dev),
   12003 			__func__));
   12004 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12005 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12006 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12007 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12008 			device_xname(sc->sc_dev), __func__));
   12009 	} else {
   12010 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12011 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12012 			status));
   12013 	}
   12014 
   12015 	return 0;
   12016 }
   12017 
   12018 /*
   12019  * wm_tbi_tick:
   12020  *
   12021  *	Check the link on TBI devices.
   12022  *	This function acts as mii_tick().
   12023  */
   12024 static void
   12025 wm_tbi_tick(struct wm_softc *sc)
   12026 {
   12027 	struct mii_data *mii = &sc->sc_mii;
   12028 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12029 	uint32_t status;
   12030 
   12031 	KASSERT(WM_CORE_LOCKED(sc));
   12032 
   12033 	status = CSR_READ(sc, WMREG_STATUS);
   12034 
   12035 	/* XXX is this needed? */
   12036 	(void)CSR_READ(sc, WMREG_RXCW);
   12037 	(void)CSR_READ(sc, WMREG_CTRL);
   12038 
   12039 	/* set link status */
   12040 	if ((status & STATUS_LU) == 0) {
   12041 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12042 			device_xname(sc->sc_dev)));
   12043 		sc->sc_tbi_linkup = 0;
   12044 	} else if (sc->sc_tbi_linkup == 0) {
   12045 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12046 			device_xname(sc->sc_dev),
   12047 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12048 		sc->sc_tbi_linkup = 1;
   12049 		sc->sc_tbi_serdes_ticks = 0;
   12050 	}
   12051 
   12052 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12053 		goto setled;
   12054 
   12055 	if ((status & STATUS_LU) == 0) {
   12056 		sc->sc_tbi_linkup = 0;
   12057 		/* If the timer expired, retry autonegotiation */
   12058 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12059 		    && (++sc->sc_tbi_serdes_ticks
   12060 			>= sc->sc_tbi_serdes_anegticks)) {
   12061 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12062 			sc->sc_tbi_serdes_ticks = 0;
   12063 			/*
   12064 			 * Reset the link, and let autonegotiation do
   12065 			 * its thing
   12066 			 */
   12067 			sc->sc_ctrl |= CTRL_LRST;
   12068 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12069 			CSR_WRITE_FLUSH(sc);
   12070 			delay(1000);
   12071 			sc->sc_ctrl &= ~CTRL_LRST;
   12072 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12073 			CSR_WRITE_FLUSH(sc);
   12074 			delay(1000);
   12075 			CSR_WRITE(sc, WMREG_TXCW,
   12076 			    sc->sc_txcw & ~TXCW_ANE);
   12077 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12078 		}
   12079 	}
   12080 
   12081 setled:
   12082 	wm_tbi_serdes_set_linkled(sc);
   12083 }
   12084 
   12085 /* SERDES related */
   12086 static void
   12087 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12088 {
   12089 	uint32_t reg;
   12090 
   12091 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12092 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12093 		return;
   12094 
   12095 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12096 	reg |= PCS_CFG_PCS_EN;
   12097 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12098 
   12099 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12100 	reg &= ~CTRL_EXT_SWDPIN(3);
   12101 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12102 	CSR_WRITE_FLUSH(sc);
   12103 }
   12104 
   12105 static int
   12106 wm_serdes_mediachange(struct ifnet *ifp)
   12107 {
   12108 	struct wm_softc *sc = ifp->if_softc;
   12109 	bool pcs_autoneg = true; /* XXX */
   12110 	uint32_t ctrl_ext, pcs_lctl, reg;
   12111 
   12112 	/* XXX Currently, this function is not called on 8257[12] */
   12113 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12114 	    || (sc->sc_type >= WM_T_82575))
   12115 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12116 
   12117 	wm_serdes_power_up_link_82575(sc);
   12118 
   12119 	sc->sc_ctrl |= CTRL_SLU;
   12120 
   12121 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12122 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12123 
   12124 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12125 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12126 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12127 	case CTRL_EXT_LINK_MODE_SGMII:
   12128 		pcs_autoneg = true;
   12129 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12130 		break;
   12131 	case CTRL_EXT_LINK_MODE_1000KX:
   12132 		pcs_autoneg = false;
   12133 		/* FALLTHROUGH */
   12134 	default:
   12135 		if ((sc->sc_type == WM_T_82575)
   12136 		    || (sc->sc_type == WM_T_82576)) {
   12137 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12138 				pcs_autoneg = false;
   12139 		}
   12140 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12141 		    | CTRL_FRCFDX;
   12142 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12143 	}
   12144 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12145 
   12146 	if (pcs_autoneg) {
   12147 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12148 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12149 
   12150 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12151 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12152 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12153 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12154 	} else
   12155 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12156 
   12157 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12158 
   12159 
   12160 	return 0;
   12161 }
   12162 
   12163 static void
   12164 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12165 {
   12166 	struct wm_softc *sc = ifp->if_softc;
   12167 	struct mii_data *mii = &sc->sc_mii;
   12168 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12169 	uint32_t pcs_adv, pcs_lpab, reg;
   12170 
   12171 	ifmr->ifm_status = IFM_AVALID;
   12172 	ifmr->ifm_active = IFM_ETHER;
   12173 
   12174 	/* Check PCS */
   12175 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12176 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12177 		ifmr->ifm_active |= IFM_NONE;
   12178 		sc->sc_tbi_linkup = 0;
   12179 		goto setled;
   12180 	}
   12181 
   12182 	sc->sc_tbi_linkup = 1;
   12183 	ifmr->ifm_status |= IFM_ACTIVE;
   12184 	if (sc->sc_type == WM_T_I354) {
   12185 		uint32_t status;
   12186 
   12187 		status = CSR_READ(sc, WMREG_STATUS);
   12188 		if (((status & STATUS_2P5_SKU) != 0)
   12189 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12190 			ifmr->ifm_active |= IFM_2500_KX;
   12191 		} else
   12192 			ifmr->ifm_active |= IFM_1000_KX;
   12193 	} else {
   12194 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12195 		case PCS_LSTS_SPEED_10:
   12196 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12197 			break;
   12198 		case PCS_LSTS_SPEED_100:
   12199 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12200 			break;
   12201 		case PCS_LSTS_SPEED_1000:
   12202 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12203 			break;
   12204 		default:
   12205 			device_printf(sc->sc_dev, "Unknown speed\n");
   12206 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12207 			break;
   12208 		}
   12209 	}
   12210 	if ((reg & PCS_LSTS_FDX) != 0)
   12211 		ifmr->ifm_active |= IFM_FDX;
   12212 	else
   12213 		ifmr->ifm_active |= IFM_HDX;
   12214 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12215 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12216 		/* Check flow */
   12217 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12218 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12219 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12220 			goto setled;
   12221 		}
   12222 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12223 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12224 		DPRINTF(WM_DEBUG_LINK,
   12225 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12226 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12227 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12228 			mii->mii_media_active |= IFM_FLOW
   12229 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12230 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12231 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12232 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12233 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12234 			mii->mii_media_active |= IFM_FLOW
   12235 			    | IFM_ETH_TXPAUSE;
   12236 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12237 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12238 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12239 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12240 			mii->mii_media_active |= IFM_FLOW
   12241 			    | IFM_ETH_RXPAUSE;
   12242 		}
   12243 	}
   12244 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12245 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12246 setled:
   12247 	wm_tbi_serdes_set_linkled(sc);
   12248 }
   12249 
   12250 /*
   12251  * wm_serdes_tick:
   12252  *
   12253  *	Check the link on serdes devices.
   12254  */
   12255 static void
   12256 wm_serdes_tick(struct wm_softc *sc)
   12257 {
   12258 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12259 	struct mii_data *mii = &sc->sc_mii;
   12260 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12261 	uint32_t reg;
   12262 
   12263 	KASSERT(WM_CORE_LOCKED(sc));
   12264 
   12265 	mii->mii_media_status = IFM_AVALID;
   12266 	mii->mii_media_active = IFM_ETHER;
   12267 
   12268 	/* Check PCS */
   12269 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12270 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12271 		mii->mii_media_status |= IFM_ACTIVE;
   12272 		sc->sc_tbi_linkup = 1;
   12273 		sc->sc_tbi_serdes_ticks = 0;
   12274 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12275 		if ((reg & PCS_LSTS_FDX) != 0)
   12276 			mii->mii_media_active |= IFM_FDX;
   12277 		else
   12278 			mii->mii_media_active |= IFM_HDX;
   12279 	} else {
   12280 		mii->mii_media_status |= IFM_NONE;
   12281 		sc->sc_tbi_linkup = 0;
   12282 		/* If the timer expired, retry autonegotiation */
   12283 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12284 		    && (++sc->sc_tbi_serdes_ticks
   12285 			>= sc->sc_tbi_serdes_anegticks)) {
   12286 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12287 			sc->sc_tbi_serdes_ticks = 0;
   12288 			/* XXX */
   12289 			wm_serdes_mediachange(ifp);
   12290 		}
   12291 	}
   12292 
   12293 	wm_tbi_serdes_set_linkled(sc);
   12294 }
   12295 
   12296 /* SFP related */
   12297 
   12298 static int
   12299 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12300 {
   12301 	uint32_t i2ccmd;
   12302 	int i;
   12303 
   12304 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12305 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12306 
   12307 	/* Poll the ready bit */
   12308 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12309 		delay(50);
   12310 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12311 		if (i2ccmd & I2CCMD_READY)
   12312 			break;
   12313 	}
   12314 	if ((i2ccmd & I2CCMD_READY) == 0)
   12315 		return -1;
   12316 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12317 		return -1;
   12318 
   12319 	*data = i2ccmd & 0x00ff;
   12320 
   12321 	return 0;
   12322 }
   12323 
   12324 static uint32_t
   12325 wm_sfp_get_media_type(struct wm_softc *sc)
   12326 {
   12327 	uint32_t ctrl_ext;
   12328 	uint8_t val = 0;
   12329 	int timeout = 3;
   12330 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12331 	int rv = -1;
   12332 
   12333 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12334 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12335 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12336 	CSR_WRITE_FLUSH(sc);
   12337 
   12338 	/* Read SFP module data */
   12339 	while (timeout) {
   12340 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12341 		if (rv == 0)
   12342 			break;
   12343 		delay(100*1000); /* XXX too big */
   12344 		timeout--;
   12345 	}
   12346 	if (rv != 0)
   12347 		goto out;
   12348 	switch (val) {
   12349 	case SFF_SFP_ID_SFF:
   12350 		aprint_normal_dev(sc->sc_dev,
   12351 		    "Module/Connector soldered to board\n");
   12352 		break;
   12353 	case SFF_SFP_ID_SFP:
   12354 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   12355 		break;
   12356 	case SFF_SFP_ID_UNKNOWN:
   12357 		goto out;
   12358 	default:
   12359 		break;
   12360 	}
   12361 
   12362 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12363 	if (rv != 0) {
   12364 		goto out;
   12365 	}
   12366 
   12367 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12368 		mediatype = WM_MEDIATYPE_SERDES;
   12369 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12370 		sc->sc_flags |= WM_F_SGMII;
   12371 		mediatype = WM_MEDIATYPE_COPPER;
   12372 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12373 		sc->sc_flags |= WM_F_SGMII;
   12374 		mediatype = WM_MEDIATYPE_SERDES;
   12375 	}
   12376 
   12377 out:
   12378 	/* Restore I2C interface setting */
   12379 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12380 
   12381 	return mediatype;
   12382 }
   12383 
   12384 /*
   12385  * NVM related.
   12386  * Microwire, SPI (w/wo EERD) and Flash.
   12387  */
   12388 
   12389 /* Both spi and uwire */
   12390 
   12391 /*
   12392  * wm_eeprom_sendbits:
   12393  *
   12394  *	Send a series of bits to the EEPROM.
   12395  */
   12396 static void
   12397 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12398 {
   12399 	uint32_t reg;
   12400 	int x;
   12401 
   12402 	reg = CSR_READ(sc, WMREG_EECD);
   12403 
   12404 	for (x = nbits; x > 0; x--) {
   12405 		if (bits & (1U << (x - 1)))
   12406 			reg |= EECD_DI;
   12407 		else
   12408 			reg &= ~EECD_DI;
   12409 		CSR_WRITE(sc, WMREG_EECD, reg);
   12410 		CSR_WRITE_FLUSH(sc);
   12411 		delay(2);
   12412 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12413 		CSR_WRITE_FLUSH(sc);
   12414 		delay(2);
   12415 		CSR_WRITE(sc, WMREG_EECD, reg);
   12416 		CSR_WRITE_FLUSH(sc);
   12417 		delay(2);
   12418 	}
   12419 }
   12420 
   12421 /*
   12422  * wm_eeprom_recvbits:
   12423  *
   12424  *	Receive a series of bits from the EEPROM.
   12425  */
   12426 static void
   12427 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12428 {
   12429 	uint32_t reg, val;
   12430 	int x;
   12431 
   12432 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12433 
   12434 	val = 0;
   12435 	for (x = nbits; x > 0; x--) {
   12436 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12437 		CSR_WRITE_FLUSH(sc);
   12438 		delay(2);
   12439 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12440 			val |= (1U << (x - 1));
   12441 		CSR_WRITE(sc, WMREG_EECD, reg);
   12442 		CSR_WRITE_FLUSH(sc);
   12443 		delay(2);
   12444 	}
   12445 	*valp = val;
   12446 }
   12447 
   12448 /* Microwire */
   12449 
   12450 /*
   12451  * wm_nvm_read_uwire:
   12452  *
   12453  *	Read a word from the EEPROM using the MicroWire protocol.
   12454  */
   12455 static int
   12456 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12457 {
   12458 	uint32_t reg, val;
   12459 	int i;
   12460 
   12461 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12462 		device_xname(sc->sc_dev), __func__));
   12463 
   12464 	if (sc->nvm.acquire(sc) != 0)
   12465 		return -1;
   12466 
   12467 	for (i = 0; i < wordcnt; i++) {
   12468 		/* Clear SK and DI. */
   12469 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12470 		CSR_WRITE(sc, WMREG_EECD, reg);
   12471 
   12472 		/*
   12473 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12474 		 * and Xen.
   12475 		 *
   12476 		 * We use this workaround only for 82540 because qemu's
   12477 		 * e1000 act as 82540.
   12478 		 */
   12479 		if (sc->sc_type == WM_T_82540) {
   12480 			reg |= EECD_SK;
   12481 			CSR_WRITE(sc, WMREG_EECD, reg);
   12482 			reg &= ~EECD_SK;
   12483 			CSR_WRITE(sc, WMREG_EECD, reg);
   12484 			CSR_WRITE_FLUSH(sc);
   12485 			delay(2);
   12486 		}
   12487 		/* XXX: end of workaround */
   12488 
   12489 		/* Set CHIP SELECT. */
   12490 		reg |= EECD_CS;
   12491 		CSR_WRITE(sc, WMREG_EECD, reg);
   12492 		CSR_WRITE_FLUSH(sc);
   12493 		delay(2);
   12494 
   12495 		/* Shift in the READ command. */
   12496 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12497 
   12498 		/* Shift in address. */
   12499 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12500 
   12501 		/* Shift out the data. */
   12502 		wm_eeprom_recvbits(sc, &val, 16);
   12503 		data[i] = val & 0xffff;
   12504 
   12505 		/* Clear CHIP SELECT. */
   12506 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12507 		CSR_WRITE(sc, WMREG_EECD, reg);
   12508 		CSR_WRITE_FLUSH(sc);
   12509 		delay(2);
   12510 	}
   12511 
   12512 	sc->nvm.release(sc);
   12513 	return 0;
   12514 }
   12515 
   12516 /* SPI */
   12517 
   12518 /*
   12519  * Set SPI and FLASH related information from the EECD register.
   12520  * For 82541 and 82547, the word size is taken from EEPROM.
   12521  */
   12522 static int
   12523 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12524 {
   12525 	int size;
   12526 	uint32_t reg;
   12527 	uint16_t data;
   12528 
   12529 	reg = CSR_READ(sc, WMREG_EECD);
   12530 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12531 
   12532 	/* Read the size of NVM from EECD by default */
   12533 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12534 	switch (sc->sc_type) {
   12535 	case WM_T_82541:
   12536 	case WM_T_82541_2:
   12537 	case WM_T_82547:
   12538 	case WM_T_82547_2:
   12539 		/* Set dummy value to access EEPROM */
   12540 		sc->sc_nvm_wordsize = 64;
   12541 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12542 			aprint_error_dev(sc->sc_dev,
   12543 			    "%s: failed to read EEPROM size\n", __func__);
   12544 		}
   12545 		reg = data;
   12546 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12547 		if (size == 0)
   12548 			size = 6; /* 64 word size */
   12549 		else
   12550 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12551 		break;
   12552 	case WM_T_80003:
   12553 	case WM_T_82571:
   12554 	case WM_T_82572:
   12555 	case WM_T_82573: /* SPI case */
   12556 	case WM_T_82574: /* SPI case */
   12557 	case WM_T_82583: /* SPI case */
   12558 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12559 		if (size > 14)
   12560 			size = 14;
   12561 		break;
   12562 	case WM_T_82575:
   12563 	case WM_T_82576:
   12564 	case WM_T_82580:
   12565 	case WM_T_I350:
   12566 	case WM_T_I354:
   12567 	case WM_T_I210:
   12568 	case WM_T_I211:
   12569 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12570 		if (size > 15)
   12571 			size = 15;
   12572 		break;
   12573 	default:
   12574 		aprint_error_dev(sc->sc_dev,
   12575 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12576 		return -1;
   12577 		break;
   12578 	}
   12579 
   12580 	sc->sc_nvm_wordsize = 1 << size;
   12581 
   12582 	return 0;
   12583 }
   12584 
   12585 /*
   12586  * wm_nvm_ready_spi:
   12587  *
   12588  *	Wait for a SPI EEPROM to be ready for commands.
   12589  */
   12590 static int
   12591 wm_nvm_ready_spi(struct wm_softc *sc)
   12592 {
   12593 	uint32_t val;
   12594 	int usec;
   12595 
   12596 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12597 		device_xname(sc->sc_dev), __func__));
   12598 
   12599 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12600 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12601 		wm_eeprom_recvbits(sc, &val, 8);
   12602 		if ((val & SPI_SR_RDY) == 0)
   12603 			break;
   12604 	}
   12605 	if (usec >= SPI_MAX_RETRIES) {
   12606 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12607 		return -1;
   12608 	}
   12609 	return 0;
   12610 }
   12611 
   12612 /*
   12613  * wm_nvm_read_spi:
   12614  *
   12615  *	Read a work from the EEPROM using the SPI protocol.
   12616  */
   12617 static int
   12618 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12619 {
   12620 	uint32_t reg, val;
   12621 	int i;
   12622 	uint8_t opc;
   12623 	int rv = 0;
   12624 
   12625 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12626 		device_xname(sc->sc_dev), __func__));
   12627 
   12628 	if (sc->nvm.acquire(sc) != 0)
   12629 		return -1;
   12630 
   12631 	/* Clear SK and CS. */
   12632 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12633 	CSR_WRITE(sc, WMREG_EECD, reg);
   12634 	CSR_WRITE_FLUSH(sc);
   12635 	delay(2);
   12636 
   12637 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12638 		goto out;
   12639 
   12640 	/* Toggle CS to flush commands. */
   12641 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12642 	CSR_WRITE_FLUSH(sc);
   12643 	delay(2);
   12644 	CSR_WRITE(sc, WMREG_EECD, reg);
   12645 	CSR_WRITE_FLUSH(sc);
   12646 	delay(2);
   12647 
   12648 	opc = SPI_OPC_READ;
   12649 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12650 		opc |= SPI_OPC_A8;
   12651 
   12652 	wm_eeprom_sendbits(sc, opc, 8);
   12653 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12654 
   12655 	for (i = 0; i < wordcnt; i++) {
   12656 		wm_eeprom_recvbits(sc, &val, 16);
   12657 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12658 	}
   12659 
   12660 	/* Raise CS and clear SK. */
   12661 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12662 	CSR_WRITE(sc, WMREG_EECD, reg);
   12663 	CSR_WRITE_FLUSH(sc);
   12664 	delay(2);
   12665 
   12666 out:
   12667 	sc->nvm.release(sc);
   12668 	return rv;
   12669 }
   12670 
   12671 /* Using with EERD */
   12672 
   12673 static int
   12674 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12675 {
   12676 	uint32_t attempts = 100000;
   12677 	uint32_t i, reg = 0;
   12678 	int32_t done = -1;
   12679 
   12680 	for (i = 0; i < attempts; i++) {
   12681 		reg = CSR_READ(sc, rw);
   12682 
   12683 		if (reg & EERD_DONE) {
   12684 			done = 0;
   12685 			break;
   12686 		}
   12687 		delay(5);
   12688 	}
   12689 
   12690 	return done;
   12691 }
   12692 
   12693 static int
   12694 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12695 {
   12696 	int i, eerd = 0;
   12697 	int rv = 0;
   12698 
   12699 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12700 		device_xname(sc->sc_dev), __func__));
   12701 
   12702 	if (sc->nvm.acquire(sc) != 0)
   12703 		return -1;
   12704 
   12705 	for (i = 0; i < wordcnt; i++) {
   12706 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12707 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12708 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12709 		if (rv != 0) {
   12710 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12711 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12712 			break;
   12713 		}
   12714 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12715 	}
   12716 
   12717 	sc->nvm.release(sc);
   12718 	return rv;
   12719 }
   12720 
   12721 /* Flash */
   12722 
   12723 static int
   12724 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12725 {
   12726 	uint32_t eecd;
   12727 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12728 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12729 	uint32_t nvm_dword = 0;
   12730 	uint8_t sig_byte = 0;
   12731 	int rv;
   12732 
   12733 	switch (sc->sc_type) {
   12734 	case WM_T_PCH_SPT:
   12735 	case WM_T_PCH_CNP:
   12736 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12737 		act_offset = ICH_NVM_SIG_WORD * 2;
   12738 
   12739 		/* Set bank to 0 in case flash read fails. */
   12740 		*bank = 0;
   12741 
   12742 		/* Check bank 0 */
   12743 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12744 		if (rv != 0)
   12745 			return rv;
   12746 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12747 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12748 			*bank = 0;
   12749 			return 0;
   12750 		}
   12751 
   12752 		/* Check bank 1 */
   12753 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12754 		    &nvm_dword);
   12755 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12756 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12757 			*bank = 1;
   12758 			return 0;
   12759 		}
   12760 		aprint_error_dev(sc->sc_dev,
   12761 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12762 		return -1;
   12763 	case WM_T_ICH8:
   12764 	case WM_T_ICH9:
   12765 		eecd = CSR_READ(sc, WMREG_EECD);
   12766 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12767 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12768 			return 0;
   12769 		}
   12770 		/* FALLTHROUGH */
   12771 	default:
   12772 		/* Default to 0 */
   12773 		*bank = 0;
   12774 
   12775 		/* Check bank 0 */
   12776 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12777 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12778 			*bank = 0;
   12779 			return 0;
   12780 		}
   12781 
   12782 		/* Check bank 1 */
   12783 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12784 		    &sig_byte);
   12785 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12786 			*bank = 1;
   12787 			return 0;
   12788 		}
   12789 	}
   12790 
   12791 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12792 		device_xname(sc->sc_dev)));
   12793 	return -1;
   12794 }
   12795 
   12796 /******************************************************************************
   12797  * This function does initial flash setup so that a new read/write/erase cycle
   12798  * can be started.
   12799  *
   12800  * sc - The pointer to the hw structure
   12801  ****************************************************************************/
   12802 static int32_t
   12803 wm_ich8_cycle_init(struct wm_softc *sc)
   12804 {
   12805 	uint16_t hsfsts;
   12806 	int32_t error = 1;
   12807 	int32_t i     = 0;
   12808 
   12809 	if (sc->sc_type >= WM_T_PCH_SPT)
   12810 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12811 	else
   12812 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12813 
   12814 	/* May be check the Flash Des Valid bit in Hw status */
   12815 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12816 		return error;
   12817 
   12818 	/* Clear FCERR in Hw status by writing 1 */
   12819 	/* Clear DAEL in Hw status by writing a 1 */
   12820 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12821 
   12822 	if (sc->sc_type >= WM_T_PCH_SPT)
   12823 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12824 	else
   12825 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12826 
   12827 	/*
   12828 	 * Either we should have a hardware SPI cycle in progress bit to check
   12829 	 * against, in order to start a new cycle or FDONE bit should be
   12830 	 * changed in the hardware so that it is 1 after harware reset, which
   12831 	 * can then be used as an indication whether a cycle is in progress or
   12832 	 * has been completed .. we should also have some software semaphore
   12833 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12834 	 * threads access to those bits can be sequentiallized or a way so that
   12835 	 * 2 threads dont start the cycle at the same time
   12836 	 */
   12837 
   12838 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12839 		/*
   12840 		 * There is no cycle running at present, so we can start a
   12841 		 * cycle
   12842 		 */
   12843 
   12844 		/* Begin by setting Flash Cycle Done. */
   12845 		hsfsts |= HSFSTS_DONE;
   12846 		if (sc->sc_type >= WM_T_PCH_SPT)
   12847 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12848 			    hsfsts & 0xffffUL);
   12849 		else
   12850 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12851 		error = 0;
   12852 	} else {
   12853 		/*
   12854 		 * Otherwise poll for sometime so the current cycle has a
   12855 		 * chance to end before giving up.
   12856 		 */
   12857 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12858 			if (sc->sc_type >= WM_T_PCH_SPT)
   12859 				hsfsts = ICH8_FLASH_READ32(sc,
   12860 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12861 			else
   12862 				hsfsts = ICH8_FLASH_READ16(sc,
   12863 				    ICH_FLASH_HSFSTS);
   12864 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12865 				error = 0;
   12866 				break;
   12867 			}
   12868 			delay(1);
   12869 		}
   12870 		if (error == 0) {
   12871 			/*
   12872 			 * Successful in waiting for previous cycle to timeout,
   12873 			 * now set the Flash Cycle Done.
   12874 			 */
   12875 			hsfsts |= HSFSTS_DONE;
   12876 			if (sc->sc_type >= WM_T_PCH_SPT)
   12877 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12878 				    hsfsts & 0xffffUL);
   12879 			else
   12880 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12881 				    hsfsts);
   12882 		}
   12883 	}
   12884 	return error;
   12885 }
   12886 
   12887 /******************************************************************************
   12888  * This function starts a flash cycle and waits for its completion
   12889  *
   12890  * sc - The pointer to the hw structure
   12891  ****************************************************************************/
   12892 static int32_t
   12893 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12894 {
   12895 	uint16_t hsflctl;
   12896 	uint16_t hsfsts;
   12897 	int32_t error = 1;
   12898 	uint32_t i = 0;
   12899 
   12900 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12901 	if (sc->sc_type >= WM_T_PCH_SPT)
   12902 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12903 	else
   12904 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12905 	hsflctl |= HSFCTL_GO;
   12906 	if (sc->sc_type >= WM_T_PCH_SPT)
   12907 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12908 		    (uint32_t)hsflctl << 16);
   12909 	else
   12910 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12911 
   12912 	/* Wait till FDONE bit is set to 1 */
   12913 	do {
   12914 		if (sc->sc_type >= WM_T_PCH_SPT)
   12915 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12916 			    & 0xffffUL;
   12917 		else
   12918 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12919 		if (hsfsts & HSFSTS_DONE)
   12920 			break;
   12921 		delay(1);
   12922 		i++;
   12923 	} while (i < timeout);
   12924 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12925 		error = 0;
   12926 
   12927 	return error;
   12928 }
   12929 
   12930 /******************************************************************************
   12931  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12932  *
   12933  * sc - The pointer to the hw structure
   12934  * index - The index of the byte or word to read.
   12935  * size - Size of data to read, 1=byte 2=word, 4=dword
   12936  * data - Pointer to the word to store the value read.
   12937  *****************************************************************************/
   12938 static int32_t
   12939 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12940     uint32_t size, uint32_t *data)
   12941 {
   12942 	uint16_t hsfsts;
   12943 	uint16_t hsflctl;
   12944 	uint32_t flash_linear_address;
   12945 	uint32_t flash_data = 0;
   12946 	int32_t error = 1;
   12947 	int32_t count = 0;
   12948 
   12949 	if (size < 1  || size > 4 || data == 0x0 ||
   12950 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12951 		return error;
   12952 
   12953 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12954 	    sc->sc_ich8_flash_base;
   12955 
   12956 	do {
   12957 		delay(1);
   12958 		/* Steps */
   12959 		error = wm_ich8_cycle_init(sc);
   12960 		if (error)
   12961 			break;
   12962 
   12963 		if (sc->sc_type >= WM_T_PCH_SPT)
   12964 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12965 			    >> 16;
   12966 		else
   12967 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12968 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12969 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12970 		    & HSFCTL_BCOUNT_MASK;
   12971 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12972 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12973 			/*
   12974 			 * In SPT, This register is in Lan memory space, not
   12975 			 * flash. Therefore, only 32 bit access is supported.
   12976 			 */
   12977 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12978 			    (uint32_t)hsflctl << 16);
   12979 		} else
   12980 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12981 
   12982 		/*
   12983 		 * Write the last 24 bits of index into Flash Linear address
   12984 		 * field in Flash Address
   12985 		 */
   12986 		/* TODO: TBD maybe check the index against the size of flash */
   12987 
   12988 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12989 
   12990 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12991 
   12992 		/*
   12993 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12994 		 * the whole sequence a few more times, else read in (shift in)
   12995 		 * the Flash Data0, the order is least significant byte first
   12996 		 * msb to lsb
   12997 		 */
   12998 		if (error == 0) {
   12999 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13000 			if (size == 1)
   13001 				*data = (uint8_t)(flash_data & 0x000000FF);
   13002 			else if (size == 2)
   13003 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13004 			else if (size == 4)
   13005 				*data = (uint32_t)flash_data;
   13006 			break;
   13007 		} else {
   13008 			/*
   13009 			 * If we've gotten here, then things are probably
   13010 			 * completely hosed, but if the error condition is
   13011 			 * detected, it won't hurt to give it another try...
   13012 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13013 			 */
   13014 			if (sc->sc_type >= WM_T_PCH_SPT)
   13015 				hsfsts = ICH8_FLASH_READ32(sc,
   13016 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13017 			else
   13018 				hsfsts = ICH8_FLASH_READ16(sc,
   13019 				    ICH_FLASH_HSFSTS);
   13020 
   13021 			if (hsfsts & HSFSTS_ERR) {
   13022 				/* Repeat for some time before giving up. */
   13023 				continue;
   13024 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13025 				break;
   13026 		}
   13027 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13028 
   13029 	return error;
   13030 }
   13031 
   13032 /******************************************************************************
   13033  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13034  *
   13035  * sc - pointer to wm_hw structure
   13036  * index - The index of the byte to read.
   13037  * data - Pointer to a byte to store the value read.
   13038  *****************************************************************************/
   13039 static int32_t
   13040 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13041 {
   13042 	int32_t status;
   13043 	uint32_t word = 0;
   13044 
   13045 	status = wm_read_ich8_data(sc, index, 1, &word);
   13046 	if (status == 0)
   13047 		*data = (uint8_t)word;
   13048 	else
   13049 		*data = 0;
   13050 
   13051 	return status;
   13052 }
   13053 
   13054 /******************************************************************************
   13055  * Reads a word from the NVM using the ICH8 flash access registers.
   13056  *
   13057  * sc - pointer to wm_hw structure
   13058  * index - The starting byte index of the word to read.
   13059  * data - Pointer to a word to store the value read.
   13060  *****************************************************************************/
   13061 static int32_t
   13062 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13063 {
   13064 	int32_t status;
   13065 	uint32_t word = 0;
   13066 
   13067 	status = wm_read_ich8_data(sc, index, 2, &word);
   13068 	if (status == 0)
   13069 		*data = (uint16_t)word;
   13070 	else
   13071 		*data = 0;
   13072 
   13073 	return status;
   13074 }
   13075 
   13076 /******************************************************************************
   13077  * Reads a dword from the NVM using the ICH8 flash access registers.
   13078  *
   13079  * sc - pointer to wm_hw structure
   13080  * index - The starting byte index of the word to read.
   13081  * data - Pointer to a word to store the value read.
   13082  *****************************************************************************/
   13083 static int32_t
   13084 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13085 {
   13086 	int32_t status;
   13087 
   13088 	status = wm_read_ich8_data(sc, index, 4, data);
   13089 	return status;
   13090 }
   13091 
   13092 /******************************************************************************
   13093  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13094  * register.
   13095  *
   13096  * sc - Struct containing variables accessed by shared code
   13097  * offset - offset of word in the EEPROM to read
   13098  * data - word read from the EEPROM
   13099  * words - number of words to read
   13100  *****************************************************************************/
   13101 static int
   13102 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13103 {
   13104 	int32_t	 rv = 0;
   13105 	uint32_t flash_bank = 0;
   13106 	uint32_t act_offset = 0;
   13107 	uint32_t bank_offset = 0;
   13108 	uint16_t word = 0;
   13109 	uint16_t i = 0;
   13110 
   13111 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13112 		device_xname(sc->sc_dev), __func__));
   13113 
   13114 	if (sc->nvm.acquire(sc) != 0)
   13115 		return -1;
   13116 
   13117 	/*
   13118 	 * We need to know which is the valid flash bank.  In the event
   13119 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13120 	 * managing flash_bank. So it cannot be trusted and needs
   13121 	 * to be updated with each read.
   13122 	 */
   13123 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13124 	if (rv) {
   13125 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13126 			device_xname(sc->sc_dev)));
   13127 		flash_bank = 0;
   13128 	}
   13129 
   13130 	/*
   13131 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13132 	 * size
   13133 	 */
   13134 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13135 
   13136 	for (i = 0; i < words; i++) {
   13137 		/* The NVM part needs a byte offset, hence * 2 */
   13138 		act_offset = bank_offset + ((offset + i) * 2);
   13139 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13140 		if (rv) {
   13141 			aprint_error_dev(sc->sc_dev,
   13142 			    "%s: failed to read NVM\n", __func__);
   13143 			break;
   13144 		}
   13145 		data[i] = word;
   13146 	}
   13147 
   13148 	sc->nvm.release(sc);
   13149 	return rv;
   13150 }
   13151 
   13152 /******************************************************************************
   13153  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13154  * register.
   13155  *
   13156  * sc - Struct containing variables accessed by shared code
   13157  * offset - offset of word in the EEPROM to read
   13158  * data - word read from the EEPROM
   13159  * words - number of words to read
   13160  *****************************************************************************/
   13161 static int
   13162 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13163 {
   13164 	int32_t	 rv = 0;
   13165 	uint32_t flash_bank = 0;
   13166 	uint32_t act_offset = 0;
   13167 	uint32_t bank_offset = 0;
   13168 	uint32_t dword = 0;
   13169 	uint16_t i = 0;
   13170 
   13171 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13172 		device_xname(sc->sc_dev), __func__));
   13173 
   13174 	if (sc->nvm.acquire(sc) != 0)
   13175 		return -1;
   13176 
   13177 	/*
   13178 	 * We need to know which is the valid flash bank.  In the event
   13179 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13180 	 * managing flash_bank. So it cannot be trusted and needs
   13181 	 * to be updated with each read.
   13182 	 */
   13183 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13184 	if (rv) {
   13185 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13186 			device_xname(sc->sc_dev)));
   13187 		flash_bank = 0;
   13188 	}
   13189 
   13190 	/*
   13191 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13192 	 * size
   13193 	 */
   13194 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13195 
   13196 	for (i = 0; i < words; i++) {
   13197 		/* The NVM part needs a byte offset, hence * 2 */
   13198 		act_offset = bank_offset + ((offset + i) * 2);
   13199 		/* but we must read dword aligned, so mask ... */
   13200 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13201 		if (rv) {
   13202 			aprint_error_dev(sc->sc_dev,
   13203 			    "%s: failed to read NVM\n", __func__);
   13204 			break;
   13205 		}
   13206 		/* ... and pick out low or high word */
   13207 		if ((act_offset & 0x2) == 0)
   13208 			data[i] = (uint16_t)(dword & 0xFFFF);
   13209 		else
   13210 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13211 	}
   13212 
   13213 	sc->nvm.release(sc);
   13214 	return rv;
   13215 }
   13216 
   13217 /* iNVM */
   13218 
   13219 static int
   13220 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13221 {
   13222 	int32_t	 rv = 0;
   13223 	uint32_t invm_dword;
   13224 	uint16_t i;
   13225 	uint8_t record_type, word_address;
   13226 
   13227 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13228 		device_xname(sc->sc_dev), __func__));
   13229 
   13230 	for (i = 0; i < INVM_SIZE; i++) {
   13231 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13232 		/* Get record type */
   13233 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13234 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13235 			break;
   13236 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13237 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13238 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13239 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13240 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13241 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13242 			if (word_address == address) {
   13243 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13244 				rv = 0;
   13245 				break;
   13246 			}
   13247 		}
   13248 	}
   13249 
   13250 	return rv;
   13251 }
   13252 
   13253 static int
   13254 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13255 {
   13256 	int rv = 0;
   13257 	int i;
   13258 
   13259 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13260 		device_xname(sc->sc_dev), __func__));
   13261 
   13262 	if (sc->nvm.acquire(sc) != 0)
   13263 		return -1;
   13264 
   13265 	for (i = 0; i < words; i++) {
   13266 		switch (offset + i) {
   13267 		case NVM_OFF_MACADDR:
   13268 		case NVM_OFF_MACADDR1:
   13269 		case NVM_OFF_MACADDR2:
   13270 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13271 			if (rv != 0) {
   13272 				data[i] = 0xffff;
   13273 				rv = -1;
   13274 			}
   13275 			break;
   13276 		case NVM_OFF_CFG2:
   13277 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13278 			if (rv != 0) {
   13279 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13280 				rv = 0;
   13281 			}
   13282 			break;
   13283 		case NVM_OFF_CFG4:
   13284 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13285 			if (rv != 0) {
   13286 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13287 				rv = 0;
   13288 			}
   13289 			break;
   13290 		case NVM_OFF_LED_1_CFG:
   13291 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13292 			if (rv != 0) {
   13293 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13294 				rv = 0;
   13295 			}
   13296 			break;
   13297 		case NVM_OFF_LED_0_2_CFG:
   13298 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13299 			if (rv != 0) {
   13300 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13301 				rv = 0;
   13302 			}
   13303 			break;
   13304 		case NVM_OFF_ID_LED_SETTINGS:
   13305 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13306 			if (rv != 0) {
   13307 				*data = ID_LED_RESERVED_FFFF;
   13308 				rv = 0;
   13309 			}
   13310 			break;
   13311 		default:
   13312 			DPRINTF(WM_DEBUG_NVM,
   13313 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13314 			*data = NVM_RESERVED_WORD;
   13315 			break;
   13316 		}
   13317 	}
   13318 
   13319 	sc->nvm.release(sc);
   13320 	return rv;
   13321 }
   13322 
   13323 /* Lock, detecting NVM type, validate checksum, version and read */
   13324 
   13325 static int
   13326 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13327 {
   13328 	uint32_t eecd = 0;
   13329 
   13330 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13331 	    || sc->sc_type == WM_T_82583) {
   13332 		eecd = CSR_READ(sc, WMREG_EECD);
   13333 
   13334 		/* Isolate bits 15 & 16 */
   13335 		eecd = ((eecd >> 15) & 0x03);
   13336 
   13337 		/* If both bits are set, device is Flash type */
   13338 		if (eecd == 0x03)
   13339 			return 0;
   13340 	}
   13341 	return 1;
   13342 }
   13343 
   13344 static int
   13345 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13346 {
   13347 	uint32_t eec;
   13348 
   13349 	eec = CSR_READ(sc, WMREG_EEC);
   13350 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13351 		return 1;
   13352 
   13353 	return 0;
   13354 }
   13355 
   13356 /*
   13357  * wm_nvm_validate_checksum
   13358  *
   13359  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13360  */
   13361 static int
   13362 wm_nvm_validate_checksum(struct wm_softc *sc)
   13363 {
   13364 	uint16_t checksum;
   13365 	uint16_t eeprom_data;
   13366 #ifdef WM_DEBUG
   13367 	uint16_t csum_wordaddr, valid_checksum;
   13368 #endif
   13369 	int i;
   13370 
   13371 	checksum = 0;
   13372 
   13373 	/* Don't check for I211 */
   13374 	if (sc->sc_type == WM_T_I211)
   13375 		return 0;
   13376 
   13377 #ifdef WM_DEBUG
   13378 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13379 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13380 		csum_wordaddr = NVM_OFF_COMPAT;
   13381 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13382 	} else {
   13383 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13384 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13385 	}
   13386 
   13387 	/* Dump EEPROM image for debug */
   13388 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13389 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13390 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13391 		/* XXX PCH_SPT? */
   13392 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13393 		if ((eeprom_data & valid_checksum) == 0)
   13394 			DPRINTF(WM_DEBUG_NVM,
   13395 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13396 				device_xname(sc->sc_dev), eeprom_data,
   13397 				    valid_checksum));
   13398 	}
   13399 
   13400 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13401 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13402 		for (i = 0; i < NVM_SIZE; i++) {
   13403 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13404 				printf("XXXX ");
   13405 			else
   13406 				printf("%04hx ", eeprom_data);
   13407 			if (i % 8 == 7)
   13408 				printf("\n");
   13409 		}
   13410 	}
   13411 
   13412 #endif /* WM_DEBUG */
   13413 
   13414 	for (i = 0; i < NVM_SIZE; i++) {
   13415 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13416 			return 1;
   13417 		checksum += eeprom_data;
   13418 	}
   13419 
   13420 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13421 #ifdef WM_DEBUG
   13422 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13423 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13424 #endif
   13425 	}
   13426 
   13427 	return 0;
   13428 }
   13429 
   13430 static void
   13431 wm_nvm_version_invm(struct wm_softc *sc)
   13432 {
   13433 	uint32_t dword;
   13434 
   13435 	/*
   13436 	 * Linux's code to decode version is very strange, so we don't
   13437 	 * obey that algorithm and just use word 61 as the document.
   13438 	 * Perhaps it's not perfect though...
   13439 	 *
   13440 	 * Example:
   13441 	 *
   13442 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13443 	 */
   13444 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13445 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13446 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13447 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13448 }
   13449 
   13450 static void
   13451 wm_nvm_version(struct wm_softc *sc)
   13452 {
   13453 	uint16_t major, minor, build, patch;
   13454 	uint16_t uid0, uid1;
   13455 	uint16_t nvm_data;
   13456 	uint16_t off;
   13457 	bool check_version = false;
   13458 	bool check_optionrom = false;
   13459 	bool have_build = false;
   13460 	bool have_uid = true;
   13461 
   13462 	/*
   13463 	 * Version format:
   13464 	 *
   13465 	 * XYYZ
   13466 	 * X0YZ
   13467 	 * X0YY
   13468 	 *
   13469 	 * Example:
   13470 	 *
   13471 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13472 	 *	82571	0x50a6	5.10.6?
   13473 	 *	82572	0x506a	5.6.10?
   13474 	 *	82572EI	0x5069	5.6.9?
   13475 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13476 	 *		0x2013	2.1.3?
   13477 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   13478 	 */
   13479 
   13480 	/*
   13481 	 * XXX
   13482 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13483 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13484 	 */
   13485 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13486 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13487 		have_uid = false;
   13488 
   13489 	switch (sc->sc_type) {
   13490 	case WM_T_82571:
   13491 	case WM_T_82572:
   13492 	case WM_T_82574:
   13493 	case WM_T_82583:
   13494 		check_version = true;
   13495 		check_optionrom = true;
   13496 		have_build = true;
   13497 		break;
   13498 	case WM_T_82575:
   13499 	case WM_T_82576:
   13500 	case WM_T_82580:
   13501 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13502 			check_version = true;
   13503 		break;
   13504 	case WM_T_I211:
   13505 		wm_nvm_version_invm(sc);
   13506 		have_uid = false;
   13507 		goto printver;
   13508 	case WM_T_I210:
   13509 		if (!wm_nvm_flash_presence_i210(sc)) {
   13510 			wm_nvm_version_invm(sc);
   13511 			have_uid = false;
   13512 			goto printver;
   13513 		}
   13514 		/* FALLTHROUGH */
   13515 	case WM_T_I350:
   13516 	case WM_T_I354:
   13517 		check_version = true;
   13518 		check_optionrom = true;
   13519 		break;
   13520 	default:
   13521 		return;
   13522 	}
   13523 	if (check_version
   13524 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13525 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13526 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13527 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13528 			build = nvm_data & NVM_BUILD_MASK;
   13529 			have_build = true;
   13530 		} else
   13531 			minor = nvm_data & 0x00ff;
   13532 
   13533 		/* Decimal */
   13534 		minor = (minor / 16) * 10 + (minor % 16);
   13535 		sc->sc_nvm_ver_major = major;
   13536 		sc->sc_nvm_ver_minor = minor;
   13537 
   13538 printver:
   13539 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13540 		    sc->sc_nvm_ver_minor);
   13541 		if (have_build) {
   13542 			sc->sc_nvm_ver_build = build;
   13543 			aprint_verbose(".%d", build);
   13544 		}
   13545 	}
   13546 
   13547 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13548 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13549 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13550 		/* Option ROM Version */
   13551 		if ((off != 0x0000) && (off != 0xffff)) {
   13552 			int rv;
   13553 
   13554 			off += NVM_COMBO_VER_OFF;
   13555 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13556 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13557 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13558 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13559 				/* 16bits */
   13560 				major = uid0 >> 8;
   13561 				build = (uid0 << 8) | (uid1 >> 8);
   13562 				patch = uid1 & 0x00ff;
   13563 				aprint_verbose(", option ROM Version %d.%d.%d",
   13564 				    major, build, patch);
   13565 			}
   13566 		}
   13567 	}
   13568 
   13569 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13570 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13571 }
   13572 
   13573 /*
   13574  * wm_nvm_read:
   13575  *
   13576  *	Read data from the serial EEPROM.
   13577  */
   13578 static int
   13579 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13580 {
   13581 	int rv;
   13582 
   13583 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13584 		device_xname(sc->sc_dev), __func__));
   13585 
   13586 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13587 		return -1;
   13588 
   13589 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13590 
   13591 	return rv;
   13592 }
   13593 
   13594 /*
   13595  * Hardware semaphores.
   13596  * Very complexed...
   13597  */
   13598 
   13599 static int
   13600 wm_get_null(struct wm_softc *sc)
   13601 {
   13602 
   13603 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13604 		device_xname(sc->sc_dev), __func__));
   13605 	return 0;
   13606 }
   13607 
   13608 static void
   13609 wm_put_null(struct wm_softc *sc)
   13610 {
   13611 
   13612 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13613 		device_xname(sc->sc_dev), __func__));
   13614 	return;
   13615 }
   13616 
   13617 static int
   13618 wm_get_eecd(struct wm_softc *sc)
   13619 {
   13620 	uint32_t reg;
   13621 	int x;
   13622 
   13623 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13624 		device_xname(sc->sc_dev), __func__));
   13625 
   13626 	reg = CSR_READ(sc, WMREG_EECD);
   13627 
   13628 	/* Request EEPROM access. */
   13629 	reg |= EECD_EE_REQ;
   13630 	CSR_WRITE(sc, WMREG_EECD, reg);
   13631 
   13632 	/* ..and wait for it to be granted. */
   13633 	for (x = 0; x < 1000; x++) {
   13634 		reg = CSR_READ(sc, WMREG_EECD);
   13635 		if (reg & EECD_EE_GNT)
   13636 			break;
   13637 		delay(5);
   13638 	}
   13639 	if ((reg & EECD_EE_GNT) == 0) {
   13640 		aprint_error_dev(sc->sc_dev,
   13641 		    "could not acquire EEPROM GNT\n");
   13642 		reg &= ~EECD_EE_REQ;
   13643 		CSR_WRITE(sc, WMREG_EECD, reg);
   13644 		return -1;
   13645 	}
   13646 
   13647 	return 0;
   13648 }
   13649 
   13650 static void
   13651 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13652 {
   13653 
   13654 	*eecd |= EECD_SK;
   13655 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13656 	CSR_WRITE_FLUSH(sc);
   13657 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13658 		delay(1);
   13659 	else
   13660 		delay(50);
   13661 }
   13662 
   13663 static void
   13664 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13665 {
   13666 
   13667 	*eecd &= ~EECD_SK;
   13668 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13669 	CSR_WRITE_FLUSH(sc);
   13670 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13671 		delay(1);
   13672 	else
   13673 		delay(50);
   13674 }
   13675 
   13676 static void
   13677 wm_put_eecd(struct wm_softc *sc)
   13678 {
   13679 	uint32_t reg;
   13680 
   13681 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13682 		device_xname(sc->sc_dev), __func__));
   13683 
   13684 	/* Stop nvm */
   13685 	reg = CSR_READ(sc, WMREG_EECD);
   13686 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13687 		/* Pull CS high */
   13688 		reg |= EECD_CS;
   13689 		wm_nvm_eec_clock_lower(sc, &reg);
   13690 	} else {
   13691 		/* CS on Microwire is active-high */
   13692 		reg &= ~(EECD_CS | EECD_DI);
   13693 		CSR_WRITE(sc, WMREG_EECD, reg);
   13694 		wm_nvm_eec_clock_raise(sc, &reg);
   13695 		wm_nvm_eec_clock_lower(sc, &reg);
   13696 	}
   13697 
   13698 	reg = CSR_READ(sc, WMREG_EECD);
   13699 	reg &= ~EECD_EE_REQ;
   13700 	CSR_WRITE(sc, WMREG_EECD, reg);
   13701 
   13702 	return;
   13703 }
   13704 
   13705 /*
   13706  * Get hardware semaphore.
   13707  * Same as e1000_get_hw_semaphore_generic()
   13708  */
   13709 static int
   13710 wm_get_swsm_semaphore(struct wm_softc *sc)
   13711 {
   13712 	int32_t timeout;
   13713 	uint32_t swsm;
   13714 
   13715 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13716 		device_xname(sc->sc_dev), __func__));
   13717 	KASSERT(sc->sc_nvm_wordsize > 0);
   13718 
   13719 retry:
   13720 	/* Get the SW semaphore. */
   13721 	timeout = sc->sc_nvm_wordsize + 1;
   13722 	while (timeout) {
   13723 		swsm = CSR_READ(sc, WMREG_SWSM);
   13724 
   13725 		if ((swsm & SWSM_SMBI) == 0)
   13726 			break;
   13727 
   13728 		delay(50);
   13729 		timeout--;
   13730 	}
   13731 
   13732 	if (timeout == 0) {
   13733 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13734 			/*
   13735 			 * In rare circumstances, the SW semaphore may already
   13736 			 * be held unintentionally. Clear the semaphore once
   13737 			 * before giving up.
   13738 			 */
   13739 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13740 			wm_put_swsm_semaphore(sc);
   13741 			goto retry;
   13742 		}
   13743 		aprint_error_dev(sc->sc_dev,
   13744 		    "could not acquire SWSM SMBI\n");
   13745 		return 1;
   13746 	}
   13747 
   13748 	/* Get the FW semaphore. */
   13749 	timeout = sc->sc_nvm_wordsize + 1;
   13750 	while (timeout) {
   13751 		swsm = CSR_READ(sc, WMREG_SWSM);
   13752 		swsm |= SWSM_SWESMBI;
   13753 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13754 		/* If we managed to set the bit we got the semaphore. */
   13755 		swsm = CSR_READ(sc, WMREG_SWSM);
   13756 		if (swsm & SWSM_SWESMBI)
   13757 			break;
   13758 
   13759 		delay(50);
   13760 		timeout--;
   13761 	}
   13762 
   13763 	if (timeout == 0) {
   13764 		aprint_error_dev(sc->sc_dev,
   13765 		    "could not acquire SWSM SWESMBI\n");
   13766 		/* Release semaphores */
   13767 		wm_put_swsm_semaphore(sc);
   13768 		return 1;
   13769 	}
   13770 	return 0;
   13771 }
   13772 
   13773 /*
   13774  * Put hardware semaphore.
   13775  * Same as e1000_put_hw_semaphore_generic()
   13776  */
   13777 static void
   13778 wm_put_swsm_semaphore(struct wm_softc *sc)
   13779 {
   13780 	uint32_t swsm;
   13781 
   13782 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13783 		device_xname(sc->sc_dev), __func__));
   13784 
   13785 	swsm = CSR_READ(sc, WMREG_SWSM);
   13786 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13787 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13788 }
   13789 
   13790 /*
   13791  * Get SW/FW semaphore.
   13792  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13793  */
   13794 static int
   13795 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13796 {
   13797 	uint32_t swfw_sync;
   13798 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13799 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13800 	int timeout;
   13801 
   13802 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13803 		device_xname(sc->sc_dev), __func__));
   13804 
   13805 	if (sc->sc_type == WM_T_80003)
   13806 		timeout = 50;
   13807 	else
   13808 		timeout = 200;
   13809 
   13810 	while (timeout) {
   13811 		if (wm_get_swsm_semaphore(sc)) {
   13812 			aprint_error_dev(sc->sc_dev,
   13813 			    "%s: failed to get semaphore\n",
   13814 			    __func__);
   13815 			return 1;
   13816 		}
   13817 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13818 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13819 			swfw_sync |= swmask;
   13820 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13821 			wm_put_swsm_semaphore(sc);
   13822 			return 0;
   13823 		}
   13824 		wm_put_swsm_semaphore(sc);
   13825 		delay(5000);
   13826 		timeout--;
   13827 	}
   13828 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13829 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13830 	return 1;
   13831 }
   13832 
   13833 static void
   13834 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13835 {
   13836 	uint32_t swfw_sync;
   13837 
   13838 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13839 		device_xname(sc->sc_dev), __func__));
   13840 
   13841 	while (wm_get_swsm_semaphore(sc) != 0)
   13842 		continue;
   13843 
   13844 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13845 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13846 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13847 
   13848 	wm_put_swsm_semaphore(sc);
   13849 }
   13850 
   13851 static int
   13852 wm_get_nvm_80003(struct wm_softc *sc)
   13853 {
   13854 	int rv;
   13855 
   13856 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13857 		device_xname(sc->sc_dev), __func__));
   13858 
   13859 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13860 		aprint_error_dev(sc->sc_dev,
   13861 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   13862 		return rv;
   13863 	}
   13864 
   13865 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13866 	    && (rv = wm_get_eecd(sc)) != 0) {
   13867 		aprint_error_dev(sc->sc_dev,
   13868 		    "%s: failed to get semaphore(EECD)\n", __func__);
   13869 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13870 		return rv;
   13871 	}
   13872 
   13873 	return 0;
   13874 }
   13875 
   13876 static void
   13877 wm_put_nvm_80003(struct wm_softc *sc)
   13878 {
   13879 
   13880 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13881 		device_xname(sc->sc_dev), __func__));
   13882 
   13883 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13884 		wm_put_eecd(sc);
   13885 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13886 }
   13887 
   13888 static int
   13889 wm_get_nvm_82571(struct wm_softc *sc)
   13890 {
   13891 	int rv;
   13892 
   13893 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13894 		device_xname(sc->sc_dev), __func__));
   13895 
   13896 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13897 		return rv;
   13898 
   13899 	switch (sc->sc_type) {
   13900 	case WM_T_82573:
   13901 		break;
   13902 	default:
   13903 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13904 			rv = wm_get_eecd(sc);
   13905 		break;
   13906 	}
   13907 
   13908 	if (rv != 0) {
   13909 		aprint_error_dev(sc->sc_dev,
   13910 		    "%s: failed to get semaphore\n",
   13911 		    __func__);
   13912 		wm_put_swsm_semaphore(sc);
   13913 	}
   13914 
   13915 	return rv;
   13916 }
   13917 
   13918 static void
   13919 wm_put_nvm_82571(struct wm_softc *sc)
   13920 {
   13921 
   13922 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13923 		device_xname(sc->sc_dev), __func__));
   13924 
   13925 	switch (sc->sc_type) {
   13926 	case WM_T_82573:
   13927 		break;
   13928 	default:
   13929 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13930 			wm_put_eecd(sc);
   13931 		break;
   13932 	}
   13933 
   13934 	wm_put_swsm_semaphore(sc);
   13935 }
   13936 
   13937 static int
   13938 wm_get_phy_82575(struct wm_softc *sc)
   13939 {
   13940 
   13941 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13942 		device_xname(sc->sc_dev), __func__));
   13943 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13944 }
   13945 
   13946 static void
   13947 wm_put_phy_82575(struct wm_softc *sc)
   13948 {
   13949 
   13950 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13951 		device_xname(sc->sc_dev), __func__));
   13952 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13953 }
   13954 
   13955 static int
   13956 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13957 {
   13958 	uint32_t ext_ctrl;
   13959 	int timeout = 200;
   13960 
   13961 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13962 		device_xname(sc->sc_dev), __func__));
   13963 
   13964 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13965 	for (timeout = 0; timeout < 200; timeout++) {
   13966 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13967 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13968 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13969 
   13970 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13971 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13972 			return 0;
   13973 		delay(5000);
   13974 	}
   13975 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13976 	    device_xname(sc->sc_dev), ext_ctrl);
   13977 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13978 	return 1;
   13979 }
   13980 
   13981 static void
   13982 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13983 {
   13984 	uint32_t ext_ctrl;
   13985 
   13986 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13987 		device_xname(sc->sc_dev), __func__));
   13988 
   13989 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13990 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13991 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13992 
   13993 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13994 }
   13995 
   13996 static int
   13997 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13998 {
   13999 	uint32_t ext_ctrl;
   14000 	int timeout;
   14001 
   14002 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14003 		device_xname(sc->sc_dev), __func__));
   14004 	mutex_enter(sc->sc_ich_phymtx);
   14005 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14006 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14007 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14008 			break;
   14009 		delay(1000);
   14010 	}
   14011 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14012 		printf("%s: SW has already locked the resource\n",
   14013 		    device_xname(sc->sc_dev));
   14014 		goto out;
   14015 	}
   14016 
   14017 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14018 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14019 	for (timeout = 0; timeout < 1000; timeout++) {
   14020 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14021 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14022 			break;
   14023 		delay(1000);
   14024 	}
   14025 	if (timeout >= 1000) {
   14026 		printf("%s: failed to acquire semaphore\n",
   14027 		    device_xname(sc->sc_dev));
   14028 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14029 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14030 		goto out;
   14031 	}
   14032 	return 0;
   14033 
   14034 out:
   14035 	mutex_exit(sc->sc_ich_phymtx);
   14036 	return 1;
   14037 }
   14038 
   14039 static void
   14040 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14041 {
   14042 	uint32_t ext_ctrl;
   14043 
   14044 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14045 		device_xname(sc->sc_dev), __func__));
   14046 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14047 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14048 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14049 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14050 	} else {
   14051 		printf("%s: Semaphore unexpectedly released\n",
   14052 		    device_xname(sc->sc_dev));
   14053 	}
   14054 
   14055 	mutex_exit(sc->sc_ich_phymtx);
   14056 }
   14057 
   14058 static int
   14059 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14060 {
   14061 
   14062 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14063 		device_xname(sc->sc_dev), __func__));
   14064 	mutex_enter(sc->sc_ich_nvmmtx);
   14065 
   14066 	return 0;
   14067 }
   14068 
   14069 static void
   14070 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14071 {
   14072 
   14073 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14074 		device_xname(sc->sc_dev), __func__));
   14075 	mutex_exit(sc->sc_ich_nvmmtx);
   14076 }
   14077 
   14078 static int
   14079 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14080 {
   14081 	int i = 0;
   14082 	uint32_t reg;
   14083 
   14084 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14085 		device_xname(sc->sc_dev), __func__));
   14086 
   14087 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14088 	do {
   14089 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14090 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14091 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14092 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14093 			break;
   14094 		delay(2*1000);
   14095 		i++;
   14096 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14097 
   14098 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14099 		wm_put_hw_semaphore_82573(sc);
   14100 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14101 		    device_xname(sc->sc_dev));
   14102 		return -1;
   14103 	}
   14104 
   14105 	return 0;
   14106 }
   14107 
   14108 static void
   14109 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14110 {
   14111 	uint32_t reg;
   14112 
   14113 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14114 		device_xname(sc->sc_dev), __func__));
   14115 
   14116 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14117 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14118 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14119 }
   14120 
   14121 /*
   14122  * Management mode and power management related subroutines.
   14123  * BMC, AMT, suspend/resume and EEE.
   14124  */
   14125 
   14126 #ifdef WM_WOL
   14127 static int
   14128 wm_check_mng_mode(struct wm_softc *sc)
   14129 {
   14130 	int rv;
   14131 
   14132 	switch (sc->sc_type) {
   14133 	case WM_T_ICH8:
   14134 	case WM_T_ICH9:
   14135 	case WM_T_ICH10:
   14136 	case WM_T_PCH:
   14137 	case WM_T_PCH2:
   14138 	case WM_T_PCH_LPT:
   14139 	case WM_T_PCH_SPT:
   14140 	case WM_T_PCH_CNP:
   14141 		rv = wm_check_mng_mode_ich8lan(sc);
   14142 		break;
   14143 	case WM_T_82574:
   14144 	case WM_T_82583:
   14145 		rv = wm_check_mng_mode_82574(sc);
   14146 		break;
   14147 	case WM_T_82571:
   14148 	case WM_T_82572:
   14149 	case WM_T_82573:
   14150 	case WM_T_80003:
   14151 		rv = wm_check_mng_mode_generic(sc);
   14152 		break;
   14153 	default:
   14154 		/* Noting to do */
   14155 		rv = 0;
   14156 		break;
   14157 	}
   14158 
   14159 	return rv;
   14160 }
   14161 
   14162 static int
   14163 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14164 {
   14165 	uint32_t fwsm;
   14166 
   14167 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14168 
   14169 	if (((fwsm & FWSM_FW_VALID) != 0)
   14170 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14171 		return 1;
   14172 
   14173 	return 0;
   14174 }
   14175 
   14176 static int
   14177 wm_check_mng_mode_82574(struct wm_softc *sc)
   14178 {
   14179 	uint16_t data;
   14180 
   14181 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14182 
   14183 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14184 		return 1;
   14185 
   14186 	return 0;
   14187 }
   14188 
   14189 static int
   14190 wm_check_mng_mode_generic(struct wm_softc *sc)
   14191 {
   14192 	uint32_t fwsm;
   14193 
   14194 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14195 
   14196 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14197 		return 1;
   14198 
   14199 	return 0;
   14200 }
   14201 #endif /* WM_WOL */
   14202 
   14203 static int
   14204 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14205 {
   14206 	uint32_t manc, fwsm, factps;
   14207 
   14208 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14209 		return 0;
   14210 
   14211 	manc = CSR_READ(sc, WMREG_MANC);
   14212 
   14213 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14214 		device_xname(sc->sc_dev), manc));
   14215 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14216 		return 0;
   14217 
   14218 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14219 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14220 		factps = CSR_READ(sc, WMREG_FACTPS);
   14221 		if (((factps & FACTPS_MNGCG) == 0)
   14222 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14223 			return 1;
   14224 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14225 		uint16_t data;
   14226 
   14227 		factps = CSR_READ(sc, WMREG_FACTPS);
   14228 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14229 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14230 			device_xname(sc->sc_dev), factps, data));
   14231 		if (((factps & FACTPS_MNGCG) == 0)
   14232 		    && ((data & NVM_CFG2_MNGM_MASK)
   14233 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14234 			return 1;
   14235 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14236 	    && ((manc & MANC_ASF_EN) == 0))
   14237 		return 1;
   14238 
   14239 	return 0;
   14240 }
   14241 
   14242 static bool
   14243 wm_phy_resetisblocked(struct wm_softc *sc)
   14244 {
   14245 	bool blocked = false;
   14246 	uint32_t reg;
   14247 	int i = 0;
   14248 
   14249 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14250 		device_xname(sc->sc_dev), __func__));
   14251 
   14252 	switch (sc->sc_type) {
   14253 	case WM_T_ICH8:
   14254 	case WM_T_ICH9:
   14255 	case WM_T_ICH10:
   14256 	case WM_T_PCH:
   14257 	case WM_T_PCH2:
   14258 	case WM_T_PCH_LPT:
   14259 	case WM_T_PCH_SPT:
   14260 	case WM_T_PCH_CNP:
   14261 		do {
   14262 			reg = CSR_READ(sc, WMREG_FWSM);
   14263 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14264 				blocked = true;
   14265 				delay(10*1000);
   14266 				continue;
   14267 			}
   14268 			blocked = false;
   14269 		} while (blocked && (i++ < 30));
   14270 		return blocked;
   14271 		break;
   14272 	case WM_T_82571:
   14273 	case WM_T_82572:
   14274 	case WM_T_82573:
   14275 	case WM_T_82574:
   14276 	case WM_T_82583:
   14277 	case WM_T_80003:
   14278 		reg = CSR_READ(sc, WMREG_MANC);
   14279 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14280 			return true;
   14281 		else
   14282 			return false;
   14283 		break;
   14284 	default:
   14285 		/* No problem */
   14286 		break;
   14287 	}
   14288 
   14289 	return false;
   14290 }
   14291 
   14292 static void
   14293 wm_get_hw_control(struct wm_softc *sc)
   14294 {
   14295 	uint32_t reg;
   14296 
   14297 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14298 		device_xname(sc->sc_dev), __func__));
   14299 
   14300 	if (sc->sc_type == WM_T_82573) {
   14301 		reg = CSR_READ(sc, WMREG_SWSM);
   14302 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14303 	} else if (sc->sc_type >= WM_T_82571) {
   14304 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14305 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14306 	}
   14307 }
   14308 
   14309 static void
   14310 wm_release_hw_control(struct wm_softc *sc)
   14311 {
   14312 	uint32_t reg;
   14313 
   14314 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14315 		device_xname(sc->sc_dev), __func__));
   14316 
   14317 	if (sc->sc_type == WM_T_82573) {
   14318 		reg = CSR_READ(sc, WMREG_SWSM);
   14319 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14320 	} else if (sc->sc_type >= WM_T_82571) {
   14321 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14322 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14323 	}
   14324 }
   14325 
   14326 static void
   14327 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14328 {
   14329 	uint32_t reg;
   14330 
   14331 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14332 		device_xname(sc->sc_dev), __func__));
   14333 
   14334 	if (sc->sc_type < WM_T_PCH2)
   14335 		return;
   14336 
   14337 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14338 
   14339 	if (gate)
   14340 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14341 	else
   14342 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14343 
   14344 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14345 }
   14346 
   14347 static int
   14348 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14349 {
   14350 	uint32_t fwsm, reg;
   14351 	int rv = 0;
   14352 
   14353 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14354 		device_xname(sc->sc_dev), __func__));
   14355 
   14356 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14357 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14358 
   14359 	/* Disable ULP */
   14360 	wm_ulp_disable(sc);
   14361 
   14362 	/* Acquire PHY semaphore */
   14363 	rv = sc->phy.acquire(sc);
   14364 	if (rv != 0) {
   14365 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14366 		device_xname(sc->sc_dev), __func__));
   14367 		return -1;
   14368 	}
   14369 
   14370 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14371 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14372 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14373 	 */
   14374 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14375 	switch (sc->sc_type) {
   14376 	case WM_T_PCH_LPT:
   14377 	case WM_T_PCH_SPT:
   14378 	case WM_T_PCH_CNP:
   14379 		if (wm_phy_is_accessible_pchlan(sc))
   14380 			break;
   14381 
   14382 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14383 		 * forcing MAC to SMBus mode first.
   14384 		 */
   14385 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14386 		reg |= CTRL_EXT_FORCE_SMBUS;
   14387 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14388 #if 0
   14389 		/* XXX Isn't this required??? */
   14390 		CSR_WRITE_FLUSH(sc);
   14391 #endif
   14392 		/* Wait 50 milliseconds for MAC to finish any retries
   14393 		 * that it might be trying to perform from previous
   14394 		 * attempts to acknowledge any phy read requests.
   14395 		 */
   14396 		delay(50 * 1000);
   14397 		/* FALLTHROUGH */
   14398 	case WM_T_PCH2:
   14399 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14400 			break;
   14401 		/* FALLTHROUGH */
   14402 	case WM_T_PCH:
   14403 		if (sc->sc_type == WM_T_PCH)
   14404 			if ((fwsm & FWSM_FW_VALID) != 0)
   14405 				break;
   14406 
   14407 		if (wm_phy_resetisblocked(sc) == true) {
   14408 			printf("XXX reset is blocked(3)\n");
   14409 			break;
   14410 		}
   14411 
   14412 		/* Toggle LANPHYPC Value bit */
   14413 		wm_toggle_lanphypc_pch_lpt(sc);
   14414 
   14415 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14416 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14417 				break;
   14418 
   14419 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14420 			 * so ensure that the MAC is also out of SMBus mode
   14421 			 */
   14422 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14423 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14424 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14425 
   14426 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14427 				break;
   14428 			rv = -1;
   14429 		}
   14430 		break;
   14431 	default:
   14432 		break;
   14433 	}
   14434 
   14435 	/* Release semaphore */
   14436 	sc->phy.release(sc);
   14437 
   14438 	if (rv == 0) {
   14439 		/* Check to see if able to reset PHY.  Print error if not */
   14440 		if (wm_phy_resetisblocked(sc)) {
   14441 			printf("XXX reset is blocked(4)\n");
   14442 			goto out;
   14443 		}
   14444 
   14445 		/* Reset the PHY before any access to it.  Doing so, ensures
   14446 		 * that the PHY is in a known good state before we read/write
   14447 		 * PHY registers.  The generic reset is sufficient here,
   14448 		 * because we haven't determined the PHY type yet.
   14449 		 */
   14450 		if (wm_reset_phy(sc) != 0)
   14451 			goto out;
   14452 
   14453 		/* On a successful reset, possibly need to wait for the PHY
   14454 		 * to quiesce to an accessible state before returning control
   14455 		 * to the calling function.  If the PHY does not quiesce, then
   14456 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14457 		 *  the PHY is in.
   14458 		 */
   14459 		if (wm_phy_resetisblocked(sc))
   14460 			printf("XXX reset is blocked(4)\n");
   14461 	}
   14462 
   14463 out:
   14464 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14465 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14466 		delay(10*1000);
   14467 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14468 	}
   14469 
   14470 	return 0;
   14471 }
   14472 
   14473 static void
   14474 wm_init_manageability(struct wm_softc *sc)
   14475 {
   14476 
   14477 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14478 		device_xname(sc->sc_dev), __func__));
   14479 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14480 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14481 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14482 
   14483 		/* Disable hardware interception of ARP */
   14484 		manc &= ~MANC_ARP_EN;
   14485 
   14486 		/* Enable receiving management packets to the host */
   14487 		if (sc->sc_type >= WM_T_82571) {
   14488 			manc |= MANC_EN_MNG2HOST;
   14489 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14490 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14491 		}
   14492 
   14493 		CSR_WRITE(sc, WMREG_MANC, manc);
   14494 	}
   14495 }
   14496 
   14497 static void
   14498 wm_release_manageability(struct wm_softc *sc)
   14499 {
   14500 
   14501 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14502 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14503 
   14504 		manc |= MANC_ARP_EN;
   14505 		if (sc->sc_type >= WM_T_82571)
   14506 			manc &= ~MANC_EN_MNG2HOST;
   14507 
   14508 		CSR_WRITE(sc, WMREG_MANC, manc);
   14509 	}
   14510 }
   14511 
   14512 static void
   14513 wm_get_wakeup(struct wm_softc *sc)
   14514 {
   14515 
   14516 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14517 	switch (sc->sc_type) {
   14518 	case WM_T_82573:
   14519 	case WM_T_82583:
   14520 		sc->sc_flags |= WM_F_HAS_AMT;
   14521 		/* FALLTHROUGH */
   14522 	case WM_T_80003:
   14523 	case WM_T_82575:
   14524 	case WM_T_82576:
   14525 	case WM_T_82580:
   14526 	case WM_T_I350:
   14527 	case WM_T_I354:
   14528 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14529 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14530 		/* FALLTHROUGH */
   14531 	case WM_T_82541:
   14532 	case WM_T_82541_2:
   14533 	case WM_T_82547:
   14534 	case WM_T_82547_2:
   14535 	case WM_T_82571:
   14536 	case WM_T_82572:
   14537 	case WM_T_82574:
   14538 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14539 		break;
   14540 	case WM_T_ICH8:
   14541 	case WM_T_ICH9:
   14542 	case WM_T_ICH10:
   14543 	case WM_T_PCH:
   14544 	case WM_T_PCH2:
   14545 	case WM_T_PCH_LPT:
   14546 	case WM_T_PCH_SPT:
   14547 	case WM_T_PCH_CNP:
   14548 		sc->sc_flags |= WM_F_HAS_AMT;
   14549 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14550 		break;
   14551 	default:
   14552 		break;
   14553 	}
   14554 
   14555 	/* 1: HAS_MANAGE */
   14556 	if (wm_enable_mng_pass_thru(sc) != 0)
   14557 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14558 
   14559 	/*
   14560 	 * Note that the WOL flags is set after the resetting of the eeprom
   14561 	 * stuff
   14562 	 */
   14563 }
   14564 
   14565 /*
   14566  * Unconfigure Ultra Low Power mode.
   14567  * Only for I217 and newer (see below).
   14568  */
   14569 static int
   14570 wm_ulp_disable(struct wm_softc *sc)
   14571 {
   14572 	uint32_t reg;
   14573 	uint16_t phyreg;
   14574 	int i = 0, rv = 0;
   14575 
   14576 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14577 		device_xname(sc->sc_dev), __func__));
   14578 	/* Exclude old devices */
   14579 	if ((sc->sc_type < WM_T_PCH_LPT)
   14580 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14581 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14582 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14583 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14584 		return 0;
   14585 
   14586 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14587 		/* Request ME un-configure ULP mode in the PHY */
   14588 		reg = CSR_READ(sc, WMREG_H2ME);
   14589 		reg &= ~H2ME_ULP;
   14590 		reg |= H2ME_ENFORCE_SETTINGS;
   14591 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14592 
   14593 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14594 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14595 			if (i++ == 30) {
   14596 				printf("%s timed out\n", __func__);
   14597 				return -1;
   14598 			}
   14599 			delay(10 * 1000);
   14600 		}
   14601 		reg = CSR_READ(sc, WMREG_H2ME);
   14602 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14603 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14604 
   14605 		return 0;
   14606 	}
   14607 
   14608 	/* Acquire semaphore */
   14609 	rv = sc->phy.acquire(sc);
   14610 	if (rv != 0) {
   14611 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14612 		device_xname(sc->sc_dev), __func__));
   14613 		return -1;
   14614 	}
   14615 
   14616 	/* Toggle LANPHYPC */
   14617 	wm_toggle_lanphypc_pch_lpt(sc);
   14618 
   14619 	/* Unforce SMBus mode in PHY */
   14620 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14621 	if (rv != 0) {
   14622 		uint32_t reg2;
   14623 
   14624 		printf("%s: Force SMBus first.\n", __func__);
   14625 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14626 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14627 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14628 		delay(50 * 1000);
   14629 
   14630 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14631 		    &phyreg);
   14632 		if (rv != 0)
   14633 			goto release;
   14634 	}
   14635 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14636 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14637 
   14638 	/* Unforce SMBus mode in MAC */
   14639 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14640 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14641 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14642 
   14643 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14644 	if (rv != 0)
   14645 		goto release;
   14646 	phyreg |= HV_PM_CTRL_K1_ENA;
   14647 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14648 
   14649 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14650 		&phyreg);
   14651 	if (rv != 0)
   14652 		goto release;
   14653 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14654 	    | I218_ULP_CONFIG1_STICKY_ULP
   14655 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14656 	    | I218_ULP_CONFIG1_WOL_HOST
   14657 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14658 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14659 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14660 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14661 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14662 	phyreg |= I218_ULP_CONFIG1_START;
   14663 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14664 
   14665 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14666 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14667 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14668 
   14669 release:
   14670 	/* Release semaphore */
   14671 	sc->phy.release(sc);
   14672 	wm_gmii_reset(sc);
   14673 	delay(50 * 1000);
   14674 
   14675 	return rv;
   14676 }
   14677 
   14678 /* WOL in the newer chipset interfaces (pchlan) */
   14679 static int
   14680 wm_enable_phy_wakeup(struct wm_softc *sc)
   14681 {
   14682 	device_t dev = sc->sc_dev;
   14683 	uint32_t mreg, moff;
   14684 	uint16_t wuce, wuc, wufc, preg;
   14685 	int i, rv;
   14686 
   14687 	KASSERT(sc->sc_type >= WM_T_PCH);
   14688 
   14689 	/* Copy MAC RARs to PHY RARs */
   14690 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   14691 
   14692 	/* Activate PHY wakeup */
   14693 	rv = sc->phy.acquire(sc);
   14694 	if (rv != 0) {
   14695 		device_printf(dev, "%s: failed to acquire semaphore\n",
   14696 		    __func__);
   14697 		return rv;
   14698 	}
   14699 
   14700 	/*
   14701 	 * Enable access to PHY wakeup registers.
   14702 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   14703 	 */
   14704 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   14705 	if (rv != 0) {
   14706 		device_printf(dev,
   14707 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   14708 		goto release;
   14709 	}
   14710 
   14711 	/* Copy MAC MTA to PHY MTA */
   14712 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   14713 		uint16_t lo, hi;
   14714 
   14715 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   14716 		lo = (uint16_t)(mreg & 0xffff);
   14717 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   14718 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   14719 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   14720 	}
   14721 
   14722 	/* Configure PHY Rx Control register */
   14723 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   14724 	mreg = CSR_READ(sc, WMREG_RCTL);
   14725 	if (mreg & RCTL_UPE)
   14726 		preg |= BM_RCTL_UPE;
   14727 	if (mreg & RCTL_MPE)
   14728 		preg |= BM_RCTL_MPE;
   14729 	preg &= ~(BM_RCTL_MO_MASK);
   14730 	moff = __SHIFTOUT(mreg, RCTL_MO);
   14731 	if (moff != 0)
   14732 		preg |= moff << BM_RCTL_MO_SHIFT;
   14733 	if (mreg & RCTL_BAM)
   14734 		preg |= BM_RCTL_BAM;
   14735 	if (mreg & RCTL_PMCF)
   14736 		preg |= BM_RCTL_PMCF;
   14737 	mreg = CSR_READ(sc, WMREG_CTRL);
   14738 	if (mreg & CTRL_RFCE)
   14739 		preg |= BM_RCTL_RFCE;
   14740 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   14741 
   14742 	wuc = WUC_APME | WUC_PME_EN;
   14743 	wufc = WUFC_MAG;
   14744 	/* Enable PHY wakeup in MAC register */
   14745 	CSR_WRITE(sc, WMREG_WUC,
   14746 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   14747 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   14748 
   14749 	/* Configure and enable PHY wakeup in PHY registers */
   14750 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   14751 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   14752 
   14753 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   14754 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   14755 
   14756 release:
   14757 	sc->phy.release(sc);
   14758 
   14759 	return 0;
   14760 }
   14761 
   14762 /* Power down workaround on D3 */
   14763 static void
   14764 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14765 {
   14766 	uint32_t reg;
   14767 	uint16_t phyreg;
   14768 	int i;
   14769 
   14770 	for (i = 0; i < 2; i++) {
   14771 		/* Disable link */
   14772 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14773 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14774 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14775 
   14776 		/*
   14777 		 * Call gig speed drop workaround on Gig disable before
   14778 		 * accessing any PHY registers
   14779 		 */
   14780 		if (sc->sc_type == WM_T_ICH8)
   14781 			wm_gig_downshift_workaround_ich8lan(sc);
   14782 
   14783 		/* Write VR power-down enable */
   14784 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14785 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14786 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14787 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   14788 
   14789 		/* Read it back and test */
   14790 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14791 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14792 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14793 			break;
   14794 
   14795 		/* Issue PHY reset and repeat at most one more time */
   14796 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14797 	}
   14798 }
   14799 
   14800 /*
   14801  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   14802  *  @sc: pointer to the HW structure
   14803  *
   14804  *  During S0 to Sx transition, it is possible the link remains at gig
   14805  *  instead of negotiating to a lower speed.  Before going to Sx, set
   14806  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   14807  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   14808  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   14809  *  needs to be written.
   14810  *  Parts that support (and are linked to a partner which support) EEE in
   14811  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   14812  *  than 10Mbps w/o EEE.
   14813  */
   14814 static void
   14815 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   14816 {
   14817 	device_t dev = sc->sc_dev;
   14818 	struct ethercom *ec = &sc->sc_ethercom;
   14819 	uint32_t phy_ctrl;
   14820 	int rv;
   14821 
   14822 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   14823 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   14824 
   14825 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   14826 
   14827 	if (sc->sc_phytype == WMPHY_I217) {
   14828 		uint16_t devid = sc->sc_pcidevid;
   14829 
   14830 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   14831 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   14832 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   14833 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   14834 		    (sc->sc_type >= WM_T_PCH_SPT))
   14835 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   14836 			    CSR_READ(sc, WMREG_FEXTNVM6)
   14837 			    & ~FEXTNVM6_REQ_PLL_CLK);
   14838 
   14839 		if (sc->phy.acquire(sc) != 0)
   14840 			goto out;
   14841 
   14842 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   14843 			uint16_t eee_advert;
   14844 
   14845 			rv = wm_read_emi_reg_locked(dev,
   14846 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   14847 			if (rv)
   14848 				goto release;
   14849 
   14850 			/*
   14851 			 * Disable LPLU if both link partners support 100BaseT
   14852 			 * EEE and 100Full is advertised on both ends of the
   14853 			 * link, and enable Auto Enable LPI since there will
   14854 			 * be no driver to enable LPI while in Sx.
   14855 			 */
   14856 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   14857 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   14858 				uint16_t anar, phy_reg;
   14859 
   14860 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   14861 				    &anar);
   14862 				if (anar & ANAR_TX_FD) {
   14863 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   14864 					    PHY_CTRL_NOND0A_LPLU);
   14865 
   14866 					/* Set Auto Enable LPI after link up */
   14867 					sc->phy.readreg_locked(dev, 2,
   14868 					    I217_LPI_GPIO_CTRL, &phy_reg);
   14869 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14870 					sc->phy.writereg_locked(dev, 2,
   14871 					    I217_LPI_GPIO_CTRL, phy_reg);
   14872 				}
   14873 			}
   14874 		}
   14875 
   14876 		/*
   14877 		 * For i217 Intel Rapid Start Technology support,
   14878 		 * when the system is going into Sx and no manageability engine
   14879 		 * is present, the driver must configure proxy to reset only on
   14880 		 * power good.	LPI (Low Power Idle) state must also reset only
   14881 		 * on power good, as well as the MTA (Multicast table array).
   14882 		 * The SMBus release must also be disabled on LCD reset.
   14883 		 */
   14884 
   14885 		/*
   14886 		 * Enable MTA to reset for Intel Rapid Start Technology
   14887 		 * Support
   14888 		 */
   14889 
   14890 release:
   14891 		sc->phy.release(sc);
   14892 	}
   14893 out:
   14894 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   14895 
   14896 	if (sc->sc_type == WM_T_ICH8)
   14897 		wm_gig_downshift_workaround_ich8lan(sc);
   14898 
   14899 	if (sc->sc_type >= WM_T_PCH) {
   14900 		wm_oem_bits_config_ich8lan(sc, false);
   14901 
   14902 		/* Reset PHY to activate OEM bits on 82577/8 */
   14903 		if (sc->sc_type == WM_T_PCH)
   14904 			wm_reset_phy(sc);
   14905 
   14906 		if (sc->phy.acquire(sc) != 0)
   14907 			return;
   14908 		wm_write_smbus_addr(sc);
   14909 		sc->phy.release(sc);
   14910 	}
   14911 }
   14912 
   14913 /*
   14914  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   14915  *  @sc: pointer to the HW structure
   14916  *
   14917  *  During Sx to S0 transitions on non-managed devices or managed devices
   14918  *  on which PHY resets are not blocked, if the PHY registers cannot be
   14919  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   14920  *  the PHY.
   14921  *  On i217, setup Intel Rapid Start Technology.
   14922  */
   14923 static int
   14924 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   14925 {
   14926 	device_t dev = sc->sc_dev;
   14927 	int rv;
   14928 
   14929 	if (sc->sc_type < WM_T_PCH2)
   14930 		return 0;
   14931 
   14932 	rv = wm_init_phy_workarounds_pchlan(sc);
   14933 	if (rv != 0)
   14934 		return -1;
   14935 
   14936 	/* For i217 Intel Rapid Start Technology support when the system
   14937 	 * is transitioning from Sx and no manageability engine is present
   14938 	 * configure SMBus to restore on reset, disable proxy, and enable
   14939 	 * the reset on MTA (Multicast table array).
   14940 	 */
   14941 	if (sc->sc_phytype == WMPHY_I217) {
   14942 		uint16_t phy_reg;
   14943 
   14944 		if (sc->phy.acquire(sc) != 0)
   14945 			return -1;
   14946 
   14947 		/* Clear Auto Enable LPI after link up */
   14948 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   14949 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14950 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   14951 
   14952 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14953 			/* Restore clear on SMB if no manageability engine
   14954 			 * is present
   14955 			 */
   14956 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   14957 			    &phy_reg);
   14958 			if (rv != 0)
   14959 				goto release;
   14960 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   14961 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   14962 
   14963 			/* Disable Proxy */
   14964 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   14965 		}
   14966 		/* Enable reset on MTA */
   14967 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   14968 		if (rv != 0)
   14969 			goto release;
   14970 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   14971 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   14972 
   14973 release:
   14974 		sc->phy.release(sc);
   14975 		return rv;
   14976 	}
   14977 
   14978 	return 0;
   14979 }
   14980 
   14981 static void
   14982 wm_enable_wakeup(struct wm_softc *sc)
   14983 {
   14984 	uint32_t reg, pmreg;
   14985 	pcireg_t pmode;
   14986 	int rv = 0;
   14987 
   14988 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14989 		device_xname(sc->sc_dev), __func__));
   14990 
   14991 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14992 	    &pmreg, NULL) == 0)
   14993 		return;
   14994 
   14995 	if ((sc->sc_flags & WM_F_WOL) == 0)
   14996 		goto pme;
   14997 
   14998 	/* Advertise the wakeup capability */
   14999 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15000 	    | CTRL_SWDPIN(3));
   15001 
   15002 	/* Keep the laser running on fiber adapters */
   15003 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15004 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15005 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15006 		reg |= CTRL_EXT_SWDPIN(3);
   15007 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15008 	}
   15009 
   15010 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15011 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15012 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15013 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15014 		wm_suspend_workarounds_ich8lan(sc);
   15015 
   15016 #if 0	/* For the multicast packet */
   15017 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15018 	reg |= WUFC_MC;
   15019 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15020 #endif
   15021 
   15022 	if (sc->sc_type >= WM_T_PCH) {
   15023 		rv = wm_enable_phy_wakeup(sc);
   15024 		if (rv != 0)
   15025 			goto pme;
   15026 	} else {
   15027 		/* Enable wakeup by the MAC */
   15028 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15029 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15030 	}
   15031 
   15032 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15033 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15034 		|| (sc->sc_type == WM_T_PCH2))
   15035 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15036 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15037 
   15038 pme:
   15039 	/* Request PME */
   15040 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15041 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15042 		/* For WOL */
   15043 		pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   15044 	} else {
   15045 		/* Disable WOL */
   15046 		pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   15047 	}
   15048 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15049 }
   15050 
   15051 /* Disable ASPM L0s and/or L1 for workaround */
   15052 static void
   15053 wm_disable_aspm(struct wm_softc *sc)
   15054 {
   15055 	pcireg_t reg, mask = 0;
   15056 	unsigned const char *str = "";
   15057 
   15058 	/*
   15059 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15060 	 * space.
   15061 	 */
   15062 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15063 		return;
   15064 
   15065 	switch (sc->sc_type) {
   15066 	case WM_T_82571:
   15067 	case WM_T_82572:
   15068 		/*
   15069 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15070 		 * State Power management L1 State (ASPM L1).
   15071 		 */
   15072 		mask = PCIE_LCSR_ASPM_L1;
   15073 		str = "L1 is";
   15074 		break;
   15075 	case WM_T_82573:
   15076 	case WM_T_82574:
   15077 	case WM_T_82583:
   15078 		/*
   15079 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15080 		 *
   15081 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15082 		 * some chipset.  The document of 82574 and 82583 says that
   15083 		 * disabling L0s with some specific chipset is sufficient,
   15084 		 * but we follow as of the Intel em driver does.
   15085 		 *
   15086 		 * References:
   15087 		 * Errata 8 of the Specification Update of i82573.
   15088 		 * Errata 20 of the Specification Update of i82574.
   15089 		 * Errata 9 of the Specification Update of i82583.
   15090 		 */
   15091 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15092 		str = "L0s and L1 are";
   15093 		break;
   15094 	default:
   15095 		return;
   15096 	}
   15097 
   15098 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15099 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15100 	reg &= ~mask;
   15101 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15102 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15103 
   15104 	/* Print only in wm_attach() */
   15105 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15106 		aprint_verbose_dev(sc->sc_dev,
   15107 		    "ASPM %s disabled to workaround the errata.\n", str);
   15108 }
   15109 
   15110 /* LPLU */
   15111 
   15112 static void
   15113 wm_lplu_d0_disable(struct wm_softc *sc)
   15114 {
   15115 	struct mii_data *mii = &sc->sc_mii;
   15116 	uint32_t reg;
   15117 	uint16_t phyval;
   15118 
   15119 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15120 		device_xname(sc->sc_dev), __func__));
   15121 
   15122 	if (sc->sc_phytype == WMPHY_IFE)
   15123 		return;
   15124 
   15125 	switch (sc->sc_type) {
   15126 	case WM_T_82571:
   15127 	case WM_T_82572:
   15128 	case WM_T_82573:
   15129 	case WM_T_82575:
   15130 	case WM_T_82576:
   15131 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   15132 		phyval &= ~PMR_D0_LPLU;
   15133 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   15134 		break;
   15135 	case WM_T_82580:
   15136 	case WM_T_I350:
   15137 	case WM_T_I210:
   15138 	case WM_T_I211:
   15139 		reg = CSR_READ(sc, WMREG_PHPM);
   15140 		reg &= ~PHPM_D0A_LPLU;
   15141 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15142 		break;
   15143 	case WM_T_82574:
   15144 	case WM_T_82583:
   15145 	case WM_T_ICH8:
   15146 	case WM_T_ICH9:
   15147 	case WM_T_ICH10:
   15148 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15149 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15150 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15151 		CSR_WRITE_FLUSH(sc);
   15152 		break;
   15153 	case WM_T_PCH:
   15154 	case WM_T_PCH2:
   15155 	case WM_T_PCH_LPT:
   15156 	case WM_T_PCH_SPT:
   15157 	case WM_T_PCH_CNP:
   15158 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15159 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15160 		if (wm_phy_resetisblocked(sc) == false)
   15161 			phyval |= HV_OEM_BITS_ANEGNOW;
   15162 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15163 		break;
   15164 	default:
   15165 		break;
   15166 	}
   15167 }
   15168 
   15169 /* EEE */
   15170 
   15171 static int
   15172 wm_set_eee_i350(struct wm_softc *sc)
   15173 {
   15174 	struct ethercom *ec = &sc->sc_ethercom;
   15175 	uint32_t ipcnfg, eeer;
   15176 	uint32_t ipcnfg_mask
   15177 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15178 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15179 
   15180 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15181 
   15182 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15183 	eeer = CSR_READ(sc, WMREG_EEER);
   15184 
   15185 	/* Enable or disable per user setting */
   15186 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15187 		ipcnfg |= ipcnfg_mask;
   15188 		eeer |= eeer_mask;
   15189 	} else {
   15190 		ipcnfg &= ~ipcnfg_mask;
   15191 		eeer &= ~eeer_mask;
   15192 	}
   15193 
   15194 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15195 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15196 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15197 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15198 
   15199 	return 0;
   15200 }
   15201 
   15202 static int
   15203 wm_set_eee_pchlan(struct wm_softc *sc)
   15204 {
   15205 	device_t dev = sc->sc_dev;
   15206 	struct ethercom *ec = &sc->sc_ethercom;
   15207 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15208 	int rv = 0;
   15209 
   15210 	switch (sc->sc_phytype) {
   15211 	case WMPHY_82579:
   15212 		lpa = I82579_EEE_LP_ABILITY;
   15213 		pcs_status = I82579_EEE_PCS_STATUS;
   15214 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15215 		break;
   15216 	case WMPHY_I217:
   15217 		lpa = I217_EEE_LP_ABILITY;
   15218 		pcs_status = I217_EEE_PCS_STATUS;
   15219 		adv_addr = I217_EEE_ADVERTISEMENT;
   15220 		break;
   15221 	default:
   15222 		return 0;
   15223 	}
   15224 
   15225 	if (sc->phy.acquire(sc)) {
   15226 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15227 		return 0;
   15228 	}
   15229 
   15230 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15231 	if (rv != 0)
   15232 		goto release;
   15233 
   15234 	/* Clear bits that enable EEE in various speeds */
   15235 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15236 
   15237 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15238 		/* Save off link partner's EEE ability */
   15239 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15240 		if (rv != 0)
   15241 			goto release;
   15242 
   15243 		/* Read EEE advertisement */
   15244 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15245 			goto release;
   15246 
   15247 		/*
   15248 		 * Enable EEE only for speeds in which the link partner is
   15249 		 * EEE capable and for which we advertise EEE.
   15250 		 */
   15251 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15252 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15253 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15254 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15255 			if ((data & ANLPAR_TX_FD) != 0)
   15256 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15257 			else {
   15258 				/*
   15259 				 * EEE is not supported in 100Half, so ignore
   15260 				 * partner's EEE in 100 ability if full-duplex
   15261 				 * is not advertised.
   15262 				 */
   15263 				sc->eee_lp_ability
   15264 				    &= ~AN_EEEADVERT_100_TX;
   15265 			}
   15266 		}
   15267 	}
   15268 
   15269 	if (sc->sc_phytype == WMPHY_82579) {
   15270 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15271 		if (rv != 0)
   15272 			goto release;
   15273 
   15274 		data &= ~I82579_LPI_PLL_SHUT_100;
   15275 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15276 	}
   15277 
   15278 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15279 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15280 		goto release;
   15281 
   15282 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15283 release:
   15284 	sc->phy.release(sc);
   15285 
   15286 	return rv;
   15287 }
   15288 
   15289 static int
   15290 wm_set_eee(struct wm_softc *sc)
   15291 {
   15292 	struct ethercom *ec = &sc->sc_ethercom;
   15293 
   15294 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15295 		return 0;
   15296 
   15297 	if (sc->sc_type == WM_T_I354) {
   15298 		/* I354 uses an external PHY */
   15299 		return 0; /* not yet */
   15300 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15301 		return wm_set_eee_i350(sc);
   15302 	else if (sc->sc_type >= WM_T_PCH2)
   15303 		return wm_set_eee_pchlan(sc);
   15304 
   15305 	return 0;
   15306 }
   15307 
   15308 /*
   15309  * Workarounds (mainly PHY related).
   15310  * Basically, PHY's workarounds are in the PHY drivers.
   15311  */
   15312 
   15313 /* Work-around for 82566 Kumeran PCS lock loss */
   15314 static int
   15315 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15316 {
   15317 	struct mii_data *mii = &sc->sc_mii;
   15318 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15319 	int i, reg, rv;
   15320 	uint16_t phyreg;
   15321 
   15322 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15323 		device_xname(sc->sc_dev), __func__));
   15324 
   15325 	/* If the link is not up, do nothing */
   15326 	if ((status & STATUS_LU) == 0)
   15327 		return 0;
   15328 
   15329 	/* Nothing to do if the link is other than 1Gbps */
   15330 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15331 		return 0;
   15332 
   15333 	for (i = 0; i < 10; i++) {
   15334 		/* read twice */
   15335 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15336 		if (rv != 0)
   15337 			return rv;
   15338 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15339 		if (rv != 0)
   15340 			return rv;
   15341 
   15342 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15343 			goto out;	/* GOOD! */
   15344 
   15345 		/* Reset the PHY */
   15346 		wm_reset_phy(sc);
   15347 		delay(5*1000);
   15348 	}
   15349 
   15350 	/* Disable GigE link negotiation */
   15351 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15352 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15353 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15354 
   15355 	/*
   15356 	 * Call gig speed drop workaround on Gig disable before accessing
   15357 	 * any PHY registers.
   15358 	 */
   15359 	wm_gig_downshift_workaround_ich8lan(sc);
   15360 
   15361 out:
   15362 	return 0;
   15363 }
   15364 
   15365 /*
   15366  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15367  *  @sc: pointer to the HW structure
   15368  *
   15369  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15370  *  LPLU, Gig disable, MDIC PHY reset):
   15371  *    1) Set Kumeran Near-end loopback
   15372  *    2) Clear Kumeran Near-end loopback
   15373  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15374  */
   15375 static void
   15376 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15377 {
   15378 	uint16_t kmreg;
   15379 
   15380 	/* Only for igp3 */
   15381 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15382 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15383 			return;
   15384 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15385 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15386 			return;
   15387 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15388 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15389 	}
   15390 }
   15391 
   15392 /*
   15393  * Workaround for pch's PHYs
   15394  * XXX should be moved to new PHY driver?
   15395  */
   15396 static int
   15397 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15398 {
   15399 	device_t dev = sc->sc_dev;
   15400 	struct mii_data *mii = &sc->sc_mii;
   15401 	struct mii_softc *child;
   15402 	uint16_t phy_data, phyrev = 0;
   15403 	int phytype = sc->sc_phytype;
   15404 	int rv;
   15405 
   15406 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15407 		device_xname(dev), __func__));
   15408 	KASSERT(sc->sc_type == WM_T_PCH);
   15409 
   15410 	/* Set MDIO slow mode before any other MDIO access */
   15411 	if (phytype == WMPHY_82577)
   15412 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15413 			return rv;
   15414 
   15415 	child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15416 	if (child != NULL)
   15417 		phyrev = child->mii_mpd_rev;
   15418 
   15419 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15420 	if ((child != NULL) &&
   15421 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15422 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15423 		/* Disable generation of early preamble (0x4431) */
   15424 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15425 		    &phy_data);
   15426 		if (rv != 0)
   15427 			return rv;
   15428 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15429 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15430 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15431 		    phy_data);
   15432 		if (rv != 0)
   15433 			return rv;
   15434 
   15435 		/* Preamble tuning for SSC */
   15436 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15437 		if (rv != 0)
   15438 			return rv;
   15439 	}
   15440 
   15441 	/* 82578 */
   15442 	if (phytype == WMPHY_82578) {
   15443 		/*
   15444 		 * Return registers to default by doing a soft reset then
   15445 		 * writing 0x3140 to the control register
   15446 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15447 		 */
   15448 		if ((child != NULL) && (phyrev < 2)) {
   15449 			PHY_RESET(child);
   15450 			rv = sc->sc_mii.mii_writereg(dev, 2, MII_BMCR,
   15451 			    0x3140);
   15452 			if (rv != 0)
   15453 				return rv;
   15454 		}
   15455 	}
   15456 
   15457 	/* Select page 0 */
   15458 	if ((rv = sc->phy.acquire(sc)) != 0)
   15459 		return rv;
   15460 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15461 	sc->phy.release(sc);
   15462 	if (rv != 0)
   15463 		return rv;
   15464 
   15465 	/*
   15466 	 * Configure the K1 Si workaround during phy reset assuming there is
   15467 	 * link so that it disables K1 if link is in 1Gbps.
   15468 	 */
   15469 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15470 		return rv;
   15471 
   15472 	/* Workaround for link disconnects on a busy hub in half duplex */
   15473 	rv = sc->phy.acquire(sc);
   15474 	if (rv)
   15475 		return rv;
   15476 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15477 	if (rv)
   15478 		goto release;
   15479 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15480 	    phy_data & 0x00ff);
   15481 	if (rv)
   15482 		goto release;
   15483 
   15484 	/* Set MSE higher to enable link to stay up when noise is high */
   15485 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15486 release:
   15487 	sc->phy.release(sc);
   15488 
   15489 	return rv;
   15490 }
   15491 
   15492 /*
   15493  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15494  *  @sc:   pointer to the HW structure
   15495  */
   15496 static void
   15497 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15498 {
   15499 	device_t dev = sc->sc_dev;
   15500 	uint32_t mac_reg;
   15501 	uint16_t i, wuce;
   15502 	int count;
   15503 
   15504 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15505 		device_xname(sc->sc_dev), __func__));
   15506 
   15507 	if (sc->phy.acquire(sc) != 0)
   15508 		return;
   15509 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15510 		goto release;
   15511 
   15512 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15513 	count = wm_rar_count(sc);
   15514 	for (i = 0; i < count; i++) {
   15515 		uint16_t lo, hi;
   15516 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15517 		lo = (uint16_t)(mac_reg & 0xffff);
   15518 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15519 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15520 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15521 
   15522 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15523 		lo = (uint16_t)(mac_reg & 0xffff);
   15524 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15525 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15526 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15527 	}
   15528 
   15529 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15530 
   15531 release:
   15532 	sc->phy.release(sc);
   15533 }
   15534 
   15535 /*
   15536  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15537  *  done after every PHY reset.
   15538  */
   15539 static int
   15540 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15541 {
   15542 	device_t dev = sc->sc_dev;
   15543 	int rv;
   15544 
   15545 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15546 		device_xname(dev), __func__));
   15547 	KASSERT(sc->sc_type == WM_T_PCH2);
   15548 
   15549 	/* Set MDIO slow mode before any other MDIO access */
   15550 	rv = wm_set_mdio_slow_mode_hv(sc);
   15551 	if (rv != 0)
   15552 		return rv;
   15553 
   15554 	rv = sc->phy.acquire(sc);
   15555 	if (rv != 0)
   15556 		return rv;
   15557 	/* Set MSE higher to enable link to stay up when noise is high */
   15558 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15559 	if (rv != 0)
   15560 		goto release;
   15561 	/* Drop link after 5 times MSE threshold was reached */
   15562 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15563 release:
   15564 	sc->phy.release(sc);
   15565 
   15566 	return rv;
   15567 }
   15568 
   15569 /**
   15570  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15571  *  @link: link up bool flag
   15572  *
   15573  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15574  *  preventing further DMA write requests.  Workaround the issue by disabling
   15575  *  the de-assertion of the clock request when in 1Gpbs mode.
   15576  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15577  *  speeds in order to avoid Tx hangs.
   15578  **/
   15579 static int
   15580 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15581 {
   15582 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15583 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15584 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15585 	uint16_t phyreg;
   15586 
   15587 	if (link && (speed == STATUS_SPEED_1000)) {
   15588 		sc->phy.acquire(sc);
   15589 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15590 		    &phyreg);
   15591 		if (rv != 0)
   15592 			goto release;
   15593 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15594 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15595 		if (rv != 0)
   15596 			goto release;
   15597 		delay(20);
   15598 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15599 
   15600 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15601 		    &phyreg);
   15602 release:
   15603 		sc->phy.release(sc);
   15604 		return rv;
   15605 	}
   15606 
   15607 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15608 
   15609 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15610 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15611 	    || !link
   15612 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15613 		goto update_fextnvm6;
   15614 
   15615 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15616 
   15617 	/* Clear link status transmit timeout */
   15618 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15619 	if (speed == STATUS_SPEED_100) {
   15620 		/* Set inband Tx timeout to 5x10us for 100Half */
   15621 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15622 
   15623 		/* Do not extend the K1 entry latency for 100Half */
   15624 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15625 	} else {
   15626 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15627 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15628 
   15629 		/* Extend the K1 entry latency for 10 Mbps */
   15630 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15631 	}
   15632 
   15633 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15634 
   15635 update_fextnvm6:
   15636 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15637 	return 0;
   15638 }
   15639 
   15640 /*
   15641  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15642  *  @sc:   pointer to the HW structure
   15643  *  @link: link up bool flag
   15644  *
   15645  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15646  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15647  *  If link is down, the function will restore the default K1 setting located
   15648  *  in the NVM.
   15649  */
   15650 static int
   15651 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15652 {
   15653 	int k1_enable = sc->sc_nvm_k1_enabled;
   15654 
   15655 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15656 		device_xname(sc->sc_dev), __func__));
   15657 
   15658 	if (sc->phy.acquire(sc) != 0)
   15659 		return -1;
   15660 
   15661 	if (link) {
   15662 		k1_enable = 0;
   15663 
   15664 		/* Link stall fix for link up */
   15665 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15666 		    0x0100);
   15667 	} else {
   15668 		/* Link stall fix for link down */
   15669 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15670 		    0x4100);
   15671 	}
   15672 
   15673 	wm_configure_k1_ich8lan(sc, k1_enable);
   15674 	sc->phy.release(sc);
   15675 
   15676 	return 0;
   15677 }
   15678 
   15679 /*
   15680  *  wm_k1_workaround_lv - K1 Si workaround
   15681  *  @sc:   pointer to the HW structure
   15682  *
   15683  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   15684  *  Disable K1 for 1000 and 100 speeds
   15685  */
   15686 static int
   15687 wm_k1_workaround_lv(struct wm_softc *sc)
   15688 {
   15689 	uint32_t reg;
   15690 	uint16_t phyreg;
   15691 	int rv;
   15692 
   15693 	if (sc->sc_type != WM_T_PCH2)
   15694 		return 0;
   15695 
   15696 	/* Set K1 beacon duration based on 10Mbps speed */
   15697 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   15698 	if (rv != 0)
   15699 		return rv;
   15700 
   15701 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   15702 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   15703 		if (phyreg &
   15704 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   15705 			/* LV 1G/100 Packet drop issue wa  */
   15706 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   15707 			    &phyreg);
   15708 			if (rv != 0)
   15709 				return rv;
   15710 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   15711 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   15712 			    phyreg);
   15713 			if (rv != 0)
   15714 				return rv;
   15715 		} else {
   15716 			/* For 10Mbps */
   15717 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   15718 			reg &= ~FEXTNVM4_BEACON_DURATION;
   15719 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   15720 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   15721 		}
   15722 	}
   15723 
   15724 	return 0;
   15725 }
   15726 
   15727 /*
   15728  *  wm_link_stall_workaround_hv - Si workaround
   15729  *  @sc: pointer to the HW structure
   15730  *
   15731  *  This function works around a Si bug where the link partner can get
   15732  *  a link up indication before the PHY does. If small packets are sent
   15733  *  by the link partner they can be placed in the packet buffer without
   15734  *  being properly accounted for by the PHY and will stall preventing
   15735  *  further packets from being received.  The workaround is to clear the
   15736  *  packet buffer after the PHY detects link up.
   15737  */
   15738 static int
   15739 wm_link_stall_workaround_hv(struct wm_softc *sc)
   15740 {
   15741 	uint16_t phyreg;
   15742 
   15743 	if (sc->sc_phytype != WMPHY_82578)
   15744 		return 0;
   15745 
   15746 	/* Do not apply workaround if in PHY loopback bit 14 set */
   15747 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   15748 	if ((phyreg & BMCR_LOOP) != 0)
   15749 		return 0;
   15750 
   15751 	/* Check if link is up and at 1Gbps */
   15752 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   15753 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15754 	    | BM_CS_STATUS_SPEED_MASK;
   15755 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15756 		| BM_CS_STATUS_SPEED_1000))
   15757 		return 0;
   15758 
   15759 	delay(200 * 1000);	/* XXX too big */
   15760 
   15761 	/* Flush the packets in the fifo buffer */
   15762 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15763 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   15764 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15765 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   15766 
   15767 	return 0;
   15768 }
   15769 
   15770 static int
   15771 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   15772 {
   15773 	int rv;
   15774 	uint16_t reg;
   15775 
   15776 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   15777 	if (rv != 0)
   15778 		return rv;
   15779 
   15780 	return  wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   15781 	    reg | HV_KMRN_MDIO_SLOW);
   15782 }
   15783 
   15784 /*
   15785  *  wm_configure_k1_ich8lan - Configure K1 power state
   15786  *  @sc: pointer to the HW structure
   15787  *  @enable: K1 state to configure
   15788  *
   15789  *  Configure the K1 power state based on the provided parameter.
   15790  *  Assumes semaphore already acquired.
   15791  */
   15792 static void
   15793 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   15794 {
   15795 	uint32_t ctrl, ctrl_ext, tmp;
   15796 	uint16_t kmreg;
   15797 	int rv;
   15798 
   15799 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15800 
   15801 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   15802 	if (rv != 0)
   15803 		return;
   15804 
   15805 	if (k1_enable)
   15806 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   15807 	else
   15808 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   15809 
   15810 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   15811 	if (rv != 0)
   15812 		return;
   15813 
   15814 	delay(20);
   15815 
   15816 	ctrl = CSR_READ(sc, WMREG_CTRL);
   15817 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   15818 
   15819 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   15820 	tmp |= CTRL_FRCSPD;
   15821 
   15822 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   15823 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   15824 	CSR_WRITE_FLUSH(sc);
   15825 	delay(20);
   15826 
   15827 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   15828 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   15829 	CSR_WRITE_FLUSH(sc);
   15830 	delay(20);
   15831 
   15832 	return;
   15833 }
   15834 
   15835 /* special case - for 82575 - need to do manual init ... */
   15836 static void
   15837 wm_reset_init_script_82575(struct wm_softc *sc)
   15838 {
   15839 	/*
   15840 	 * Remark: this is untested code - we have no board without EEPROM
   15841 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   15842 	 */
   15843 
   15844 	/* SerDes configuration via SERDESCTRL */
   15845 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   15846 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   15847 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   15848 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   15849 
   15850 	/* CCM configuration via CCMCTL register */
   15851 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   15852 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   15853 
   15854 	/* PCIe lanes configuration */
   15855 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   15856 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   15857 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   15858 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   15859 
   15860 	/* PCIe PLL Configuration */
   15861 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   15862 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   15863 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   15864 }
   15865 
   15866 static void
   15867 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   15868 {
   15869 	uint32_t reg;
   15870 	uint16_t nvmword;
   15871 	int rv;
   15872 
   15873 	if (sc->sc_type != WM_T_82580)
   15874 		return;
   15875 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   15876 		return;
   15877 
   15878 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   15879 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   15880 	if (rv != 0) {
   15881 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   15882 		    __func__);
   15883 		return;
   15884 	}
   15885 
   15886 	reg = CSR_READ(sc, WMREG_MDICNFG);
   15887 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   15888 		reg |= MDICNFG_DEST;
   15889 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   15890 		reg |= MDICNFG_COM_MDIO;
   15891 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15892 }
   15893 
   15894 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   15895 
   15896 static bool
   15897 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   15898 {
   15899 	uint32_t reg;
   15900 	uint16_t id1, id2;
   15901 	int i, rv;
   15902 
   15903 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15904 		device_xname(sc->sc_dev), __func__));
   15905 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15906 
   15907 	id1 = id2 = 0xffff;
   15908 	for (i = 0; i < 2; i++) {
   15909 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   15910 		    &id1);
   15911 		if ((rv != 0) || MII_INVALIDID(id1))
   15912 			continue;
   15913 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   15914 		    &id2);
   15915 		if ((rv != 0) || MII_INVALIDID(id2))
   15916 			continue;
   15917 		break;
   15918 	}
   15919 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   15920 		goto out;
   15921 
   15922 	/*
   15923 	 * In case the PHY needs to be in mdio slow mode,
   15924 	 * set slow mode and try to get the PHY id again.
   15925 	 */
   15926 	rv = 0;
   15927 	if (sc->sc_type < WM_T_PCH_LPT) {
   15928 		sc->phy.release(sc);
   15929 		wm_set_mdio_slow_mode_hv(sc);
   15930 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   15931 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   15932 		sc->phy.acquire(sc);
   15933 	}
   15934 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   15935 		printf("XXX return with false\n");
   15936 		return false;
   15937 	}
   15938 out:
   15939 	if (sc->sc_type >= WM_T_PCH_LPT) {
   15940 		/* Only unforce SMBus if ME is not active */
   15941 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15942 			uint16_t phyreg;
   15943 
   15944 			/* Unforce SMBus mode in PHY */
   15945 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   15946 			    CV_SMB_CTRL, &phyreg);
   15947 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15948 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   15949 			    CV_SMB_CTRL, phyreg);
   15950 
   15951 			/* Unforce SMBus mode in MAC */
   15952 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15953 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15954 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15955 		}
   15956 	}
   15957 	return true;
   15958 }
   15959 
   15960 static void
   15961 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   15962 {
   15963 	uint32_t reg;
   15964 	int i;
   15965 
   15966 	/* Set PHY Config Counter to 50msec */
   15967 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   15968 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   15969 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   15970 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   15971 
   15972 	/* Toggle LANPHYPC */
   15973 	reg = CSR_READ(sc, WMREG_CTRL);
   15974 	reg |= CTRL_LANPHYPC_OVERRIDE;
   15975 	reg &= ~CTRL_LANPHYPC_VALUE;
   15976 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15977 	CSR_WRITE_FLUSH(sc);
   15978 	delay(1000);
   15979 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   15980 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15981 	CSR_WRITE_FLUSH(sc);
   15982 
   15983 	if (sc->sc_type < WM_T_PCH_LPT)
   15984 		delay(50 * 1000);
   15985 	else {
   15986 		i = 20;
   15987 
   15988 		do {
   15989 			delay(5 * 1000);
   15990 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   15991 		    && i--);
   15992 
   15993 		delay(30 * 1000);
   15994 	}
   15995 }
   15996 
   15997 static int
   15998 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   15999 {
   16000 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16001 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16002 	uint32_t rxa;
   16003 	uint16_t scale = 0, lat_enc = 0;
   16004 	int32_t obff_hwm = 0;
   16005 	int64_t lat_ns, value;
   16006 
   16007 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16008 		device_xname(sc->sc_dev), __func__));
   16009 
   16010 	if (link) {
   16011 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16012 		uint32_t status;
   16013 		uint16_t speed;
   16014 		pcireg_t preg;
   16015 
   16016 		status = CSR_READ(sc, WMREG_STATUS);
   16017 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16018 		case STATUS_SPEED_10:
   16019 			speed = 10;
   16020 			break;
   16021 		case STATUS_SPEED_100:
   16022 			speed = 100;
   16023 			break;
   16024 		case STATUS_SPEED_1000:
   16025 			speed = 1000;
   16026 			break;
   16027 		default:
   16028 			device_printf(sc->sc_dev, "Unknown speed "
   16029 			    "(status = %08x)\n", status);
   16030 			return -1;
   16031 		}
   16032 
   16033 		/* Rx Packet Buffer Allocation size (KB) */
   16034 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16035 
   16036 		/*
   16037 		 * Determine the maximum latency tolerated by the device.
   16038 		 *
   16039 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16040 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16041 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16042 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16043 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16044 		 */
   16045 		lat_ns = ((int64_t)rxa * 1024 -
   16046 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16047 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16048 		if (lat_ns < 0)
   16049 			lat_ns = 0;
   16050 		else
   16051 			lat_ns /= speed;
   16052 		value = lat_ns;
   16053 
   16054 		while (value > LTRV_VALUE) {
   16055 			scale ++;
   16056 			value = howmany(value, __BIT(5));
   16057 		}
   16058 		if (scale > LTRV_SCALE_MAX) {
   16059 			printf("%s: Invalid LTR latency scale %d\n",
   16060 			    device_xname(sc->sc_dev), scale);
   16061 			return -1;
   16062 		}
   16063 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16064 
   16065 		/* Determine the maximum latency tolerated by the platform */
   16066 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16067 		    WM_PCI_LTR_CAP_LPT);
   16068 		max_snoop = preg & 0xffff;
   16069 		max_nosnoop = preg >> 16;
   16070 
   16071 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16072 
   16073 		if (lat_enc > max_ltr_enc) {
   16074 			lat_enc = max_ltr_enc;
   16075 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16076 			    * PCI_LTR_SCALETONS(
   16077 				    __SHIFTOUT(lat_enc,
   16078 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16079 		}
   16080 
   16081 		if (lat_ns) {
   16082 			lat_ns *= speed * 1000;
   16083 			lat_ns /= 8;
   16084 			lat_ns /= 1000000000;
   16085 			obff_hwm = (int32_t)(rxa - lat_ns);
   16086 		}
   16087 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16088 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16089 			    "(rxa = %d, lat_ns = %d)\n",
   16090 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16091 			return -1;
   16092 		}
   16093 	}
   16094 	/* Snoop and No-Snoop latencies the same */
   16095 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16096 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16097 
   16098 	/* Set OBFF high water mark */
   16099 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16100 	reg |= obff_hwm;
   16101 	CSR_WRITE(sc, WMREG_SVT, reg);
   16102 
   16103 	/* Enable OBFF */
   16104 	reg = CSR_READ(sc, WMREG_SVCR);
   16105 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16106 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16107 
   16108 	return 0;
   16109 }
   16110 
   16111 /*
   16112  * I210 Errata 25 and I211 Errata 10
   16113  * Slow System Clock.
   16114  */
   16115 static int
   16116 wm_pll_workaround_i210(struct wm_softc *sc)
   16117 {
   16118 	uint32_t mdicnfg, wuc;
   16119 	uint32_t reg;
   16120 	pcireg_t pcireg;
   16121 	uint32_t pmreg;
   16122 	uint16_t nvmword, tmp_nvmword;
   16123 	uint16_t phyval;
   16124 	bool wa_done = false;
   16125 	int i, rv = 0;
   16126 
   16127 	/* Get Power Management cap offset */
   16128 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16129 	    &pmreg, NULL) == 0)
   16130 		return -1;
   16131 
   16132 	/* Save WUC and MDICNFG registers */
   16133 	wuc = CSR_READ(sc, WMREG_WUC);
   16134 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16135 
   16136 	reg = mdicnfg & ~MDICNFG_DEST;
   16137 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16138 
   16139 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   16140 		nvmword = INVM_DEFAULT_AL;
   16141 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16142 
   16143 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16144 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16145 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16146 
   16147 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16148 			rv = 0;
   16149 			break; /* OK */
   16150 		} else
   16151 			rv = -1;
   16152 
   16153 		wa_done = true;
   16154 		/* Directly reset the internal PHY */
   16155 		reg = CSR_READ(sc, WMREG_CTRL);
   16156 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16157 
   16158 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16159 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16160 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16161 
   16162 		CSR_WRITE(sc, WMREG_WUC, 0);
   16163 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16164 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16165 
   16166 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16167 		    pmreg + PCI_PMCSR);
   16168 		pcireg |= PCI_PMCSR_STATE_D3;
   16169 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16170 		    pmreg + PCI_PMCSR, pcireg);
   16171 		delay(1000);
   16172 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16173 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16174 		    pmreg + PCI_PMCSR, pcireg);
   16175 
   16176 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16177 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16178 
   16179 		/* Restore WUC register */
   16180 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16181 	}
   16182 
   16183 	/* Restore MDICNFG setting */
   16184 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16185 	if (wa_done)
   16186 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16187 	return rv;
   16188 }
   16189 
   16190 static void
   16191 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16192 {
   16193 	uint32_t reg;
   16194 
   16195 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16196 		device_xname(sc->sc_dev), __func__));
   16197 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16198 	    || (sc->sc_type == WM_T_PCH_CNP));
   16199 
   16200 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16201 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16202 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16203 
   16204 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16205 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16206 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16207 }
   16208