Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.760
      1 /*	$NetBSD: if_wm.c,v 1.760 2022/08/12 10:57:06 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.760 2022/08/12 10:57:06 riastradh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 
     94 #include <sys/atomic.h>
     95 #include <sys/callout.h>
     96 #include <sys/cpu.h>
     97 #include <sys/device.h>
     98 #include <sys/errno.h>
     99 #include <sys/interrupt.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/kernel.h>
    102 #include <sys/kmem.h>
    103 #include <sys/mbuf.h>
    104 #include <sys/pcq.h>
    105 #include <sys/queue.h>
    106 #include <sys/rndsource.h>
    107 #include <sys/socket.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/syslog.h>
    110 #include <sys/systm.h>
    111 #include <sys/workqueue.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <net/rss_config.h>
    121 
    122 #include <netinet/in.h>			/* XXX for struct ip */
    123 #include <netinet/in_systm.h>		/* XXX for struct ip */
    124 #include <netinet/ip.h>			/* XXX for struct ip */
    125 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    126 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    127 
    128 #include <sys/bus.h>
    129 #include <sys/intr.h>
    130 #include <machine/endian.h>
    131 
    132 #include <dev/mii/mii.h>
    133 #include <dev/mii/mdio.h>
    134 #include <dev/mii/miivar.h>
    135 #include <dev/mii/miidevs.h>
    136 #include <dev/mii/mii_bitbang.h>
    137 #include <dev/mii/ikphyreg.h>
    138 #include <dev/mii/igphyreg.h>
    139 #include <dev/mii/igphyvar.h>
    140 #include <dev/mii/inbmphyreg.h>
    141 #include <dev/mii/ihphyreg.h>
    142 #include <dev/mii/makphyreg.h>
    143 
    144 #include <dev/pci/pcireg.h>
    145 #include <dev/pci/pcivar.h>
    146 #include <dev/pci/pcidevs.h>
    147 
    148 #include <dev/pci/if_wmreg.h>
    149 #include <dev/pci/if_wmvar.h>
    150 
    151 #ifdef WM_DEBUG
    152 #define	WM_DEBUG_LINK		__BIT(0)
    153 #define	WM_DEBUG_TX		__BIT(1)
    154 #define	WM_DEBUG_RX		__BIT(2)
    155 #define	WM_DEBUG_GMII		__BIT(3)
    156 #define	WM_DEBUG_MANAGE		__BIT(4)
    157 #define	WM_DEBUG_NVM		__BIT(5)
    158 #define	WM_DEBUG_INIT		__BIT(6)
    159 #define	WM_DEBUG_LOCK		__BIT(7)
    160 
    161 #if 0
    162 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    163 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    164 	WM_DEBUG_LOCK
    165 #endif
    166 
    167 #define	DPRINTF(sc, x, y)			  \
    168 	do {					  \
    169 		if ((sc)->sc_debug & (x))	  \
    170 			printf y;		  \
    171 	} while (0)
    172 #else
    173 #define	DPRINTF(sc, x, y)	__nothing
    174 #endif /* WM_DEBUG */
    175 
    176 #ifdef NET_MPSAFE
    177 #define WM_MPSAFE	1
    178 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    179 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    180 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    181 #else
    182 #define WM_CALLOUT_FLAGS	0
    183 #define WM_SOFTINT_FLAGS	0
    184 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    185 #endif
    186 
    187 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    188 
    189 /*
    190  * This device driver's max interrupt numbers.
    191  */
    192 #define WM_MAX_NQUEUEINTR	16
    193 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    194 
    195 #ifndef WM_DISABLE_MSI
    196 #define	WM_DISABLE_MSI 0
    197 #endif
    198 #ifndef WM_DISABLE_MSIX
    199 #define	WM_DISABLE_MSIX 0
    200 #endif
    201 
    202 int wm_disable_msi = WM_DISABLE_MSI;
    203 int wm_disable_msix = WM_DISABLE_MSIX;
    204 
    205 #ifndef WM_WATCHDOG_TIMEOUT
    206 #define WM_WATCHDOG_TIMEOUT 5
    207 #endif
    208 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    209 
    210 /*
    211  * Transmit descriptor list size.  Due to errata, we can only have
    212  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    213  * on >= 82544. We tell the upper layers that they can queue a lot
    214  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    215  * of them at a time.
    216  *
    217  * We allow up to 64 DMA segments per packet.  Pathological packet
    218  * chains containing many small mbufs have been observed in zero-copy
    219  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    220  * m_defrag() is called to reduce it.
    221  */
    222 #define	WM_NTXSEGS		64
    223 #define	WM_IFQUEUELEN		256
    224 #define	WM_TXQUEUELEN_MAX	64
    225 #define	WM_TXQUEUELEN_MAX_82547	16
    226 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    227 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    228 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    229 #define	WM_NTXDESC_82542	256
    230 #define	WM_NTXDESC_82544	4096
    231 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    232 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    233 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    234 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    235 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    236 
    237 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    238 
    239 #define	WM_TXINTERQSIZE		256
    240 
    241 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    242 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    243 #endif
    244 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    245 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    246 #endif
    247 
    248 /*
    249  * Receive descriptor list size.  We have one Rx buffer for normal
    250  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    251  * packet.  We allocate 256 receive descriptors, each with a 2k
    252  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    253  */
    254 #define	WM_NRXDESC		256U
    255 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    256 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    257 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    258 
    259 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    260 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    261 #endif
    262 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    263 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    264 #endif
    265 
    266 typedef union txdescs {
    267 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    268 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    269 } txdescs_t;
    270 
    271 typedef union rxdescs {
    272 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    273 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    274 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    275 } rxdescs_t;
    276 
    277 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    278 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    279 
    280 /*
    281  * Software state for transmit jobs.
    282  */
    283 struct wm_txsoft {
    284 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    285 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    286 	int txs_firstdesc;		/* first descriptor in packet */
    287 	int txs_lastdesc;		/* last descriptor in packet */
    288 	int txs_ndesc;			/* # of descriptors used */
    289 };
    290 
    291 /*
    292  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    293  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    294  * them together.
    295  */
    296 struct wm_rxsoft {
    297 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    298 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    299 };
    300 
    301 #define WM_LINKUP_TIMEOUT	50
    302 
    303 static uint16_t swfwphysem[] = {
    304 	SWFW_PHY0_SM,
    305 	SWFW_PHY1_SM,
    306 	SWFW_PHY2_SM,
    307 	SWFW_PHY3_SM
    308 };
    309 
    310 static const uint32_t wm_82580_rxpbs_table[] = {
    311 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    312 };
    313 
    314 struct wm_softc;
    315 
    316 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    317 #if !defined(WM_EVENT_COUNTERS)
    318 #define WM_EVENT_COUNTERS 1
    319 #endif
    320 #endif
    321 
    322 #ifdef WM_EVENT_COUNTERS
    323 #define WM_Q_EVCNT_DEFINE(qname, evname)				 \
    324 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    325 	struct evcnt qname##_ev_##evname
    326 
    327 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    328 	do {								\
    329 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    330 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    331 		    "%s%02d%s", #qname, (qnum), #evname);		\
    332 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    333 		    (evtype), NULL, (xname),				\
    334 		    (q)->qname##_##evname##_evcnt_name);		\
    335 	} while (0)
    336 
    337 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    338 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    339 
    340 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    341 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    342 
    343 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    344 	evcnt_detach(&(q)->qname##_ev_##evname)
    345 #endif /* WM_EVENT_COUNTERS */
    346 
    347 struct wm_txqueue {
    348 	kmutex_t *txq_lock;		/* lock for tx operations */
    349 
    350 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    351 
    352 	/* Software state for the transmit descriptors. */
    353 	int txq_num;			/* must be a power of two */
    354 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    355 
    356 	/* TX control data structures. */
    357 	int txq_ndesc;			/* must be a power of two */
    358 	size_t txq_descsize;		/* a tx descriptor size */
    359 	txdescs_t *txq_descs_u;
    360 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    361 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    362 	int txq_desc_rseg;		/* real number of control segment */
    363 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    364 #define	txq_descs	txq_descs_u->sctxu_txdescs
    365 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    366 
    367 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    368 
    369 	int txq_free;			/* number of free Tx descriptors */
    370 	int txq_next;			/* next ready Tx descriptor */
    371 
    372 	int txq_sfree;			/* number of free Tx jobs */
    373 	int txq_snext;			/* next free Tx job */
    374 	int txq_sdirty;			/* dirty Tx jobs */
    375 
    376 	/* These 4 variables are used only on the 82547. */
    377 	int txq_fifo_size;		/* Tx FIFO size */
    378 	int txq_fifo_head;		/* current head of FIFO */
    379 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    380 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    381 
    382 	/*
    383 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    384 	 * CPUs. This queue intermediate them without block.
    385 	 */
    386 	pcq_t *txq_interq;
    387 
    388 	/*
    389 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    390 	 * to manage Tx H/W queue's busy flag.
    391 	 */
    392 	int txq_flags;			/* flags for H/W queue, see below */
    393 #define	WM_TXQ_NO_SPACE		0x1
    394 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    395 
    396 	bool txq_stopping;
    397 
    398 	bool txq_sending;
    399 	time_t txq_lastsent;
    400 
    401 	/* Checksum flags used for previous packet */
    402 	uint32_t	txq_last_hw_cmd;
    403 	uint8_t		txq_last_hw_fields;
    404 	uint16_t	txq_last_hw_ipcs;
    405 	uint16_t	txq_last_hw_tucs;
    406 
    407 	uint32_t txq_packets;		/* for AIM */
    408 	uint32_t txq_bytes;		/* for AIM */
    409 #ifdef WM_EVENT_COUNTERS
    410 	/* TX event counters */
    411 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
    412 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
    413 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
    414 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
    415 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
    416 					    /* XXX not used? */
    417 
    418 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
    419 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
    420 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
    421 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
    422 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
    423 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
    424 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
    425 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
    426 					    /* other than toomanyseg */
    427 
    428 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
    429 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
    430 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
    431 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
    432 
    433 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    434 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    435 #endif /* WM_EVENT_COUNTERS */
    436 };
    437 
    438 struct wm_rxqueue {
    439 	kmutex_t *rxq_lock;		/* lock for rx operations */
    440 
    441 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    442 
    443 	/* Software state for the receive descriptors. */
    444 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    445 
    446 	/* RX control data structures. */
    447 	int rxq_ndesc;			/* must be a power of two */
    448 	size_t rxq_descsize;		/* a rx descriptor size */
    449 	rxdescs_t *rxq_descs_u;
    450 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    451 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    452 	int rxq_desc_rseg;		/* real number of control segment */
    453 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    454 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    455 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    456 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    457 
    458 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    459 
    460 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    461 	int rxq_discard;
    462 	int rxq_len;
    463 	struct mbuf *rxq_head;
    464 	struct mbuf *rxq_tail;
    465 	struct mbuf **rxq_tailp;
    466 
    467 	bool rxq_stopping;
    468 
    469 	uint32_t rxq_packets;		/* for AIM */
    470 	uint32_t rxq_bytes;		/* for AIM */
    471 #ifdef WM_EVENT_COUNTERS
    472 	/* RX event counters */
    473 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    474 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    475 
    476 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    477 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    478 #endif
    479 };
    480 
    481 struct wm_queue {
    482 	int wmq_id;			/* index of TX/RX queues */
    483 	int wmq_intr_idx;		/* index of MSI-X tables */
    484 
    485 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    486 	bool wmq_set_itr;
    487 
    488 	struct wm_txqueue wmq_txq;
    489 	struct wm_rxqueue wmq_rxq;
    490 	char sysctlname[32];		/* Name for sysctl */
    491 
    492 	bool wmq_txrx_use_workqueue;
    493 	struct work wmq_cookie;
    494 	void *wmq_si;
    495 };
    496 
    497 struct wm_phyop {
    498 	int (*acquire)(struct wm_softc *);
    499 	void (*release)(struct wm_softc *);
    500 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    501 	int (*writereg_locked)(device_t, int, int, uint16_t);
    502 	int reset_delay_us;
    503 	bool no_errprint;
    504 };
    505 
    506 struct wm_nvmop {
    507 	int (*acquire)(struct wm_softc *);
    508 	void (*release)(struct wm_softc *);
    509 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    510 };
    511 
    512 /*
    513  * Software state per device.
    514  */
    515 struct wm_softc {
    516 	device_t sc_dev;		/* generic device information */
    517 	bus_space_tag_t sc_st;		/* bus space tag */
    518 	bus_space_handle_t sc_sh;	/* bus space handle */
    519 	bus_size_t sc_ss;		/* bus space size */
    520 	bus_space_tag_t sc_iot;		/* I/O space tag */
    521 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    522 	bus_size_t sc_ios;		/* I/O space size */
    523 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    524 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    525 	bus_size_t sc_flashs;		/* flash registers space size */
    526 	off_t sc_flashreg_offset;	/*
    527 					 * offset to flash registers from
    528 					 * start of BAR
    529 					 */
    530 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    531 
    532 	struct ethercom sc_ethercom;	/* Ethernet common data */
    533 	struct mii_data sc_mii;		/* MII/media information */
    534 
    535 	pci_chipset_tag_t sc_pc;
    536 	pcitag_t sc_pcitag;
    537 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    538 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    539 
    540 	uint16_t sc_pcidevid;		/* PCI device ID */
    541 	wm_chip_type sc_type;		/* MAC type */
    542 	int sc_rev;			/* MAC revision */
    543 	wm_phy_type sc_phytype;		/* PHY type */
    544 	uint8_t sc_sfptype;		/* SFP type */
    545 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    546 #define	WM_MEDIATYPE_UNKNOWN		0x00
    547 #define	WM_MEDIATYPE_FIBER		0x01
    548 #define	WM_MEDIATYPE_COPPER		0x02
    549 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    550 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    551 	int sc_flags;			/* flags; see below */
    552 	u_short sc_if_flags;		/* last if_flags */
    553 	int sc_ec_capenable;		/* last ec_capenable */
    554 	int sc_flowflags;		/* 802.3x flow control flags */
    555 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    556 	int sc_align_tweak;
    557 
    558 	void *sc_ihs[WM_MAX_NINTR];	/*
    559 					 * interrupt cookie.
    560 					 * - legacy and msi use sc_ihs[0] only
    561 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    562 					 */
    563 	pci_intr_handle_t *sc_intrs;	/*
    564 					 * legacy and msi use sc_intrs[0] only
    565 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    566 					 */
    567 	int sc_nintrs;			/* number of interrupts */
    568 
    569 	int sc_link_intr_idx;		/* index of MSI-X tables */
    570 
    571 	callout_t sc_tick_ch;		/* tick callout */
    572 	bool sc_core_stopping;
    573 
    574 	int sc_nvm_ver_major;
    575 	int sc_nvm_ver_minor;
    576 	int sc_nvm_ver_build;
    577 	int sc_nvm_addrbits;		/* NVM address bits */
    578 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    579 	int sc_ich8_flash_base;
    580 	int sc_ich8_flash_bank_size;
    581 	int sc_nvm_k1_enabled;
    582 
    583 	int sc_nqueues;
    584 	struct wm_queue *sc_queue;
    585 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    586 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    587 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    588 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    589 	struct workqueue *sc_queue_wq;
    590 	bool sc_txrx_use_workqueue;
    591 
    592 	int sc_affinity_offset;
    593 
    594 #ifdef WM_EVENT_COUNTERS
    595 	/* Event counters. */
    596 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    597 
    598 	/* >= WM_T_82542_2_1 */
    599 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    600 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    601 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    602 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    603 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    604 
    605 	struct evcnt sc_ev_crcerrs;	/* CRC Error */
    606 	struct evcnt sc_ev_algnerrc;	/* Alignment Error */
    607 	struct evcnt sc_ev_symerrc;	/* Symbol Error */
    608 	struct evcnt sc_ev_rxerrc;	/* Receive Error */
    609 	struct evcnt sc_ev_mpc;		/* Missed Packets */
    610 	struct evcnt sc_ev_colc;	/* Collision */
    611 	struct evcnt sc_ev_sec;		/* Sequence Error */
    612 	struct evcnt sc_ev_cexterr;	/* Carrier Extension Error */
    613 	struct evcnt sc_ev_rlec;	/* Receive Length Error */
    614 	struct evcnt sc_ev_scc;		/* Single Collision */
    615 	struct evcnt sc_ev_ecol;	/* Excessive Collision */
    616 	struct evcnt sc_ev_mcc;		/* Multiple Collision */
    617 	struct evcnt sc_ev_latecol;	/* Late Collision */
    618 	struct evcnt sc_ev_dc;		/* Defer */
    619 	struct evcnt sc_ev_gprc;	/* Good Packets Rx */
    620 	struct evcnt sc_ev_bprc;	/* Broadcast Packets Rx */
    621 	struct evcnt sc_ev_mprc;	/* Multicast Packets Rx */
    622 	struct evcnt sc_ev_gptc;	/* Good Packets Tx */
    623 	struct evcnt sc_ev_gorc;	/* Good Octets Rx */
    624 	struct evcnt sc_ev_gotc;	/* Good Octets Tx */
    625 	struct evcnt sc_ev_rnbc;	/* Rx No Buffers */
    626 	struct evcnt sc_ev_ruc;		/* Rx Undersize */
    627 	struct evcnt sc_ev_rfc;		/* Rx Fragment */
    628 	struct evcnt sc_ev_roc;		/* Rx Oversize */
    629 	struct evcnt sc_ev_rjc;		/* Rx Jabber */
    630 	struct evcnt sc_ev_tor;		/* Total Octets Rx */
    631 	struct evcnt sc_ev_tot;		/* Total Octets Tx */
    632 	struct evcnt sc_ev_tpr;		/* Total Packets Rx */
    633 	struct evcnt sc_ev_tpt;		/* Total Packets Tx */
    634 	struct evcnt sc_ev_mptc;	/* Multicast Packets Tx */
    635 	struct evcnt sc_ev_bptc;	/* Broadcast Packets Tx Count */
    636 	struct evcnt sc_ev_prc64;	/* Packets Rx (64 bytes) */
    637 	struct evcnt sc_ev_prc127;	/* Packets Rx (65-127 bytes) */
    638 	struct evcnt sc_ev_prc255;	/* Packets Rx (128-255 bytes) */
    639 	struct evcnt sc_ev_prc511;	/* Packets Rx (255-511 bytes) */
    640 	struct evcnt sc_ev_prc1023;	/* Packets Rx (512-1023 bytes) */
    641 	struct evcnt sc_ev_prc1522;	/* Packets Rx (1024-1522 bytes) */
    642 	struct evcnt sc_ev_ptc64;	/* Packets Tx (64 bytes) */
    643 	struct evcnt sc_ev_ptc127;	/* Packets Tx (65-127 bytes) */
    644 	struct evcnt sc_ev_ptc255;	/* Packets Tx (128-255 bytes) */
    645 	struct evcnt sc_ev_ptc511;	/* Packets Tx (256-511 bytes) */
    646 	struct evcnt sc_ev_ptc1023;	/* Packets Tx (512-1023 bytes) */
    647 	struct evcnt sc_ev_ptc1522;	/* Packets Tx (1024-1522 Bytes) */
    648 	struct evcnt sc_ev_iac;		/* Interrupt Assertion */
    649 	struct evcnt sc_ev_icrxptc;	/* Intr. Cause Rx Pkt Timer Expire */
    650 	struct evcnt sc_ev_icrxatc;	/* Intr. Cause Rx Abs Timer Expire */
    651 	struct evcnt sc_ev_ictxptc;	/* Intr. Cause Tx Pkt Timer Expire */
    652 	struct evcnt sc_ev_ictxact;	/* Intr. Cause Tx Abs Timer Expire */
    653 	struct evcnt sc_ev_ictxqec;	/* Intr. Cause Tx Queue Empty */
    654 	struct evcnt sc_ev_ictxqmtc;	/* Intr. Cause Tx Queue Min Thresh */
    655 	struct evcnt sc_ev_icrxdmtc;	/* Intr. Cause Rx Desc Min Thresh */
    656 	struct evcnt sc_ev_icrxoc;	/* Intr. Cause Receiver Overrun */
    657 	struct evcnt sc_ev_tncrs;	/* Tx-No CRS */
    658 	struct evcnt sc_ev_tsctc;	/* TCP Segmentation Context Tx */
    659 	struct evcnt sc_ev_tsctfc;	/* TCP Segmentation Context Tx Fail */
    660 	struct evcnt sc_ev_mgtprc;	/* Management Packets RX */
    661 	struct evcnt sc_ev_mgtpdc;	/* Management Packets Dropped */
    662 	struct evcnt sc_ev_mgtptc;	/* Management Packets TX */
    663 	struct evcnt sc_ev_b2ogprc;	/* BMC2OS pkts received by host */
    664 	struct evcnt sc_ev_o2bspc;	/* OS2BMC pkts transmitted by host */
    665 	struct evcnt sc_ev_b2ospc;	/* BMC2OS pkts sent by BMC */
    666 	struct evcnt sc_ev_o2bgptc;	/* OS2BMC pkts received by BMC */
    667 
    668 #endif /* WM_EVENT_COUNTERS */
    669 
    670 	struct sysctllog *sc_sysctllog;
    671 
    672 	/* This variable are used only on the 82547. */
    673 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    674 
    675 	uint32_t sc_ctrl;		/* prototype CTRL register */
    676 #if 0
    677 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    678 #endif
    679 	uint32_t sc_icr;		/* prototype interrupt bits */
    680 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    681 	uint32_t sc_tctl;		/* prototype TCTL register */
    682 	uint32_t sc_rctl;		/* prototype RCTL register */
    683 	uint32_t sc_txcw;		/* prototype TXCW register */
    684 	uint32_t sc_tipg;		/* prototype TIPG register */
    685 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    686 	uint32_t sc_pba;		/* prototype PBA register */
    687 
    688 	int sc_tbi_linkup;		/* TBI link status */
    689 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    690 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    691 
    692 	int sc_mchash_type;		/* multicast filter offset */
    693 
    694 	krndsource_t rnd_source;	/* random source */
    695 
    696 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    697 
    698 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    699 	kmutex_t *sc_ich_phymtx;	/*
    700 					 * 82574/82583/ICH/PCH specific PHY
    701 					 * mutex. For 82574/82583, the mutex
    702 					 * is used for both PHY and NVM.
    703 					 */
    704 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    705 
    706 	struct wm_phyop phy;
    707 	struct wm_nvmop nvm;
    708 
    709 	bool sc_dying;
    710 
    711 #ifdef WM_DEBUG
    712 	uint32_t sc_debug;
    713 #endif
    714 };
    715 
    716 #define WM_CORE_LOCK(_sc)						\
    717 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    718 #define WM_CORE_UNLOCK(_sc)						\
    719 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    720 #define WM_CORE_LOCKED(_sc)						\
    721 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    722 
    723 #define	WM_RXCHAIN_RESET(rxq)						\
    724 do {									\
    725 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    726 	*(rxq)->rxq_tailp = NULL;					\
    727 	(rxq)->rxq_len = 0;						\
    728 } while (/*CONSTCOND*/0)
    729 
    730 #define	WM_RXCHAIN_LINK(rxq, m)						\
    731 do {									\
    732 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    733 	(rxq)->rxq_tailp = &(m)->m_next;				\
    734 } while (/*CONSTCOND*/0)
    735 
    736 #ifdef WM_EVENT_COUNTERS
    737 #ifdef __HAVE_ATOMIC64_LOADSTORE
    738 #define	WM_EVCNT_INCR(ev)						\
    739 	atomic_store_relaxed(&((ev)->ev_count),				\
    740 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    741 #define	WM_EVCNT_ADD(ev, val)						\
    742 	atomic_store_relaxed(&((ev)->ev_count),				\
    743 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    744 #else
    745 #define	WM_EVCNT_INCR(ev)						\
    746 	((ev)->ev_count)++
    747 #define	WM_EVCNT_ADD(ev, val)						\
    748 	(ev)->ev_count += (val)
    749 #endif
    750 
    751 #define WM_Q_EVCNT_INCR(qname, evname)			\
    752 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    753 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    754 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    755 #else /* !WM_EVENT_COUNTERS */
    756 #define	WM_EVCNT_INCR(ev)	/* nothing */
    757 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    758 
    759 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    760 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    761 #endif /* !WM_EVENT_COUNTERS */
    762 
    763 #define	CSR_READ(sc, reg)						\
    764 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    765 #define	CSR_WRITE(sc, reg, val)						\
    766 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    767 #define	CSR_WRITE_FLUSH(sc)						\
    768 	(void)CSR_READ((sc), WMREG_STATUS)
    769 
    770 #define ICH8_FLASH_READ32(sc, reg)					\
    771 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    772 	    (reg) + sc->sc_flashreg_offset)
    773 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    774 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    775 	    (reg) + sc->sc_flashreg_offset, (data))
    776 
    777 #define ICH8_FLASH_READ16(sc, reg)					\
    778 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    779 	    (reg) + sc->sc_flashreg_offset)
    780 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    781 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    782 	    (reg) + sc->sc_flashreg_offset, (data))
    783 
    784 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    785 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    786 
    787 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    788 #define	WM_CDTXADDR_HI(txq, x)						\
    789 	(sizeof(bus_addr_t) == 8 ?					\
    790 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    791 
    792 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    793 #define	WM_CDRXADDR_HI(rxq, x)						\
    794 	(sizeof(bus_addr_t) == 8 ?					\
    795 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    796 
    797 /*
    798  * Register read/write functions.
    799  * Other than CSR_{READ|WRITE}().
    800  */
    801 #if 0
    802 static inline uint32_t wm_io_read(struct wm_softc *, int);
    803 #endif
    804 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    805 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    806     uint32_t, uint32_t);
    807 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    808 
    809 /*
    810  * Descriptor sync/init functions.
    811  */
    812 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    813 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    814 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    815 
    816 /*
    817  * Device driver interface functions and commonly used functions.
    818  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    819  */
    820 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    821 static int	wm_match(device_t, cfdata_t, void *);
    822 static void	wm_attach(device_t, device_t, void *);
    823 static int	wm_detach(device_t, int);
    824 static bool	wm_suspend(device_t, const pmf_qual_t *);
    825 static bool	wm_resume(device_t, const pmf_qual_t *);
    826 static void	wm_watchdog(struct ifnet *);
    827 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    828     uint16_t *);
    829 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    830     uint16_t *);
    831 static void	wm_tick(void *);
    832 static int	wm_ifflags_cb(struct ethercom *);
    833 static int	wm_ioctl(struct ifnet *, u_long, void *);
    834 /* MAC address related */
    835 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    836 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    837 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    838 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    839 static int	wm_rar_count(struct wm_softc *);
    840 static void	wm_set_filter(struct wm_softc *);
    841 /* Reset and init related */
    842 static void	wm_set_vlan(struct wm_softc *);
    843 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    844 static void	wm_get_auto_rd_done(struct wm_softc *);
    845 static void	wm_lan_init_done(struct wm_softc *);
    846 static void	wm_get_cfg_done(struct wm_softc *);
    847 static int	wm_phy_post_reset(struct wm_softc *);
    848 static int	wm_write_smbus_addr(struct wm_softc *);
    849 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    850 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    851 static void	wm_initialize_hardware_bits(struct wm_softc *);
    852 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    853 static int	wm_reset_phy(struct wm_softc *);
    854 static void	wm_flush_desc_rings(struct wm_softc *);
    855 static void	wm_reset(struct wm_softc *);
    856 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    857 static void	wm_rxdrain(struct wm_rxqueue *);
    858 static void	wm_init_rss(struct wm_softc *);
    859 static void	wm_adjust_qnum(struct wm_softc *, int);
    860 static inline bool	wm_is_using_msix(struct wm_softc *);
    861 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    862 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    863 static int	wm_setup_legacy(struct wm_softc *);
    864 static int	wm_setup_msix(struct wm_softc *);
    865 static int	wm_init(struct ifnet *);
    866 static int	wm_init_locked(struct ifnet *);
    867 static void	wm_init_sysctls(struct wm_softc *);
    868 static void	wm_unset_stopping_flags(struct wm_softc *);
    869 static void	wm_set_stopping_flags(struct wm_softc *);
    870 static void	wm_stop(struct ifnet *, int);
    871 static void	wm_stop_locked(struct ifnet *, bool, bool);
    872 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    873 static void	wm_82547_txfifo_stall(void *);
    874 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    875 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    876 /* DMA related */
    877 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    878 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    879 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    880 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    881     struct wm_txqueue *);
    882 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    883 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    884 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    885     struct wm_rxqueue *);
    886 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    887 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    888 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    889 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    890 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    891 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    892 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    893     struct wm_txqueue *);
    894 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    895     struct wm_rxqueue *);
    896 static int	wm_alloc_txrx_queues(struct wm_softc *);
    897 static void	wm_free_txrx_queues(struct wm_softc *);
    898 static int	wm_init_txrx_queues(struct wm_softc *);
    899 /* Start */
    900 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    901     struct wm_txsoft *, uint32_t *, uint8_t *);
    902 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    903 static void	wm_start(struct ifnet *);
    904 static void	wm_start_locked(struct ifnet *);
    905 static int	wm_transmit(struct ifnet *, struct mbuf *);
    906 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    907 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    908 		    bool);
    909 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    910     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    911 static void	wm_nq_start(struct ifnet *);
    912 static void	wm_nq_start_locked(struct ifnet *);
    913 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    914 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    915 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    916 		    bool);
    917 static void	wm_deferred_start_locked(struct wm_txqueue *);
    918 static void	wm_handle_queue(void *);
    919 static void	wm_handle_queue_work(struct work *, void *);
    920 /* Interrupt */
    921 static bool	wm_txeof(struct wm_txqueue *, u_int);
    922 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    923 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    924 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    925 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    926 static void	wm_linkintr(struct wm_softc *, uint32_t);
    927 static int	wm_intr_legacy(void *);
    928 static inline void	wm_txrxintr_disable(struct wm_queue *);
    929 static inline void	wm_txrxintr_enable(struct wm_queue *);
    930 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    931 static int	wm_txrxintr_msix(void *);
    932 static int	wm_linkintr_msix(void *);
    933 
    934 /*
    935  * Media related.
    936  * GMII, SGMII, TBI, SERDES and SFP.
    937  */
    938 /* Common */
    939 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    940 /* GMII related */
    941 static void	wm_gmii_reset(struct wm_softc *);
    942 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    943 static int	wm_get_phy_id_82575(struct wm_softc *);
    944 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    945 static int	wm_gmii_mediachange(struct ifnet *);
    946 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    947 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    948 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    949 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    950 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    951 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    952 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    953 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    954 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    955 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    956 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    957 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    958 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    959 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    960 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    961 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    962 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    963 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    964 	bool);
    965 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    966 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    967 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    968 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    969 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    970 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    971 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    972 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    973 static void	wm_gmii_statchg(struct ifnet *);
    974 /*
    975  * kumeran related (80003, ICH* and PCH*).
    976  * These functions are not for accessing MII registers but for accessing
    977  * kumeran specific registers.
    978  */
    979 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    980 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    981 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    982 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    983 /* EMI register related */
    984 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    985 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    986 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    987 /* SGMII */
    988 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    989 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    990 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    991 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    992 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    993 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    994 /* TBI related */
    995 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    996 static void	wm_tbi_mediainit(struct wm_softc *);
    997 static int	wm_tbi_mediachange(struct ifnet *);
    998 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    999 static int	wm_check_for_link(struct wm_softc *);
   1000 static void	wm_tbi_tick(struct wm_softc *);
   1001 /* SERDES related */
   1002 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
   1003 static int	wm_serdes_mediachange(struct ifnet *);
   1004 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
   1005 static void	wm_serdes_tick(struct wm_softc *);
   1006 /* SFP related */
   1007 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
   1008 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
   1009 
   1010 /*
   1011  * NVM related.
   1012  * Microwire, SPI (w/wo EERD) and Flash.
   1013  */
   1014 /* Misc functions */
   1015 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
   1016 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
   1017 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
   1018 /* Microwire */
   1019 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
   1020 /* SPI */
   1021 static int	wm_nvm_ready_spi(struct wm_softc *);
   1022 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
   1023 /* Using with EERD */
   1024 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
   1025 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
   1026 /* Flash */
   1027 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
   1028     unsigned int *);
   1029 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
   1030 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
   1031 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
   1032     uint32_t *);
   1033 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
   1034 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
   1035 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
   1036 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
   1037 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
   1038 /* iNVM */
   1039 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
   1040 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
   1041 /* Lock, detecting NVM type, validate checksum and read */
   1042 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
   1043 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
   1044 static int	wm_nvm_validate_checksum(struct wm_softc *);
   1045 static void	wm_nvm_version_invm(struct wm_softc *);
   1046 static void	wm_nvm_version(struct wm_softc *);
   1047 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
   1048 
   1049 /*
   1050  * Hardware semaphores.
   1051  * Very complexed...
   1052  */
   1053 static int	wm_get_null(struct wm_softc *);
   1054 static void	wm_put_null(struct wm_softc *);
   1055 static int	wm_get_eecd(struct wm_softc *);
   1056 static void	wm_put_eecd(struct wm_softc *);
   1057 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
   1058 static void	wm_put_swsm_semaphore(struct wm_softc *);
   1059 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
   1060 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
   1061 static int	wm_get_nvm_80003(struct wm_softc *);
   1062 static void	wm_put_nvm_80003(struct wm_softc *);
   1063 static int	wm_get_nvm_82571(struct wm_softc *);
   1064 static void	wm_put_nvm_82571(struct wm_softc *);
   1065 static int	wm_get_phy_82575(struct wm_softc *);
   1066 static void	wm_put_phy_82575(struct wm_softc *);
   1067 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1068 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1069 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1070 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1071 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1072 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1073 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1074 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1075 
   1076 /*
   1077  * Management mode and power management related subroutines.
   1078  * BMC, AMT, suspend/resume and EEE.
   1079  */
   1080 #if 0
   1081 static int	wm_check_mng_mode(struct wm_softc *);
   1082 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1083 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1084 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1085 #endif
   1086 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1087 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1088 static void	wm_get_hw_control(struct wm_softc *);
   1089 static void	wm_release_hw_control(struct wm_softc *);
   1090 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1091 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1092 static void	wm_init_manageability(struct wm_softc *);
   1093 static void	wm_release_manageability(struct wm_softc *);
   1094 static void	wm_get_wakeup(struct wm_softc *);
   1095 static int	wm_ulp_disable(struct wm_softc *);
   1096 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1097 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1098 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1099 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1100 static void	wm_enable_wakeup(struct wm_softc *);
   1101 static void	wm_disable_aspm(struct wm_softc *);
   1102 /* LPLU (Low Power Link Up) */
   1103 static void	wm_lplu_d0_disable(struct wm_softc *);
   1104 /* EEE */
   1105 static int	wm_set_eee_i350(struct wm_softc *);
   1106 static int	wm_set_eee_pchlan(struct wm_softc *);
   1107 static int	wm_set_eee(struct wm_softc *);
   1108 
   1109 /*
   1110  * Workarounds (mainly PHY related).
   1111  * Basically, PHY's workarounds are in the PHY drivers.
   1112  */
   1113 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1114 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1115 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1116 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1117 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1118 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1119 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1120 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1121 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1122 static int	wm_k1_workaround_lv(struct wm_softc *);
   1123 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1124 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1125 static int	wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
   1126 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1127 static void	wm_reset_init_script_82575(struct wm_softc *);
   1128 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1129 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1130 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1131 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1132 static int	wm_pll_workaround_i210(struct wm_softc *);
   1133 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1134 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1135 static void	wm_set_linkdown_discard(struct wm_softc *);
   1136 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1137 
   1138 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
   1139 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
   1140 #ifdef WM_DEBUG
   1141 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1142 #endif
   1143 
   1144 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1145     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1146 
   1147 /*
   1148  * Devices supported by this driver.
   1149  */
   1150 static const struct wm_product {
   1151 	pci_vendor_id_t		wmp_vendor;
   1152 	pci_product_id_t	wmp_product;
   1153 	const char		*wmp_name;
   1154 	wm_chip_type		wmp_type;
   1155 	uint32_t		wmp_flags;
   1156 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1157 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1158 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1159 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1160 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1161 } wm_products[] = {
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1163 	  "Intel i82542 1000BASE-X Ethernet",
   1164 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1167 	  "Intel i82543GC 1000BASE-X Ethernet",
   1168 	  WM_T_82543,		WMP_F_FIBER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1171 	  "Intel i82543GC 1000BASE-T Ethernet",
   1172 	  WM_T_82543,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1175 	  "Intel i82544EI 1000BASE-T Ethernet",
   1176 	  WM_T_82544,		WMP_F_COPPER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1179 	  "Intel i82544EI 1000BASE-X Ethernet",
   1180 	  WM_T_82544,		WMP_F_FIBER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1183 	  "Intel i82544GC 1000BASE-T Ethernet",
   1184 	  WM_T_82544,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1187 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1188 	  WM_T_82544,		WMP_F_COPPER },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1191 	  "Intel i82540EM 1000BASE-T Ethernet",
   1192 	  WM_T_82540,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1195 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1196 	  WM_T_82540,		WMP_F_COPPER },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1199 	  "Intel i82540EP 1000BASE-T Ethernet",
   1200 	  WM_T_82540,		WMP_F_COPPER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1203 	  "Intel i82540EP 1000BASE-T Ethernet",
   1204 	  WM_T_82540,		WMP_F_COPPER },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1207 	  "Intel i82540EP 1000BASE-T Ethernet",
   1208 	  WM_T_82540,		WMP_F_COPPER },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1211 	  "Intel i82545EM 1000BASE-T Ethernet",
   1212 	  WM_T_82545,		WMP_F_COPPER },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1215 	  "Intel i82545GM 1000BASE-T Ethernet",
   1216 	  WM_T_82545_3,		WMP_F_COPPER },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1219 	  "Intel i82545GM 1000BASE-X Ethernet",
   1220 	  WM_T_82545_3,		WMP_F_FIBER },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1223 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1224 	  WM_T_82545_3,		WMP_F_SERDES },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1227 	  "Intel i82546EB 1000BASE-T Ethernet",
   1228 	  WM_T_82546,		WMP_F_COPPER },
   1229 
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1231 	  "Intel i82546EB 1000BASE-T Ethernet",
   1232 	  WM_T_82546,		WMP_F_COPPER },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1235 	  "Intel i82545EM 1000BASE-X Ethernet",
   1236 	  WM_T_82545,		WMP_F_FIBER },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1239 	  "Intel i82546EB 1000BASE-X Ethernet",
   1240 	  WM_T_82546,		WMP_F_FIBER },
   1241 
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1243 	  "Intel i82546GB 1000BASE-T Ethernet",
   1244 	  WM_T_82546_3,		WMP_F_COPPER },
   1245 
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1247 	  "Intel i82546GB 1000BASE-X Ethernet",
   1248 	  WM_T_82546_3,		WMP_F_FIBER },
   1249 
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1251 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1252 	  WM_T_82546_3,		WMP_F_SERDES },
   1253 
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1255 	  "i82546GB quad-port Gigabit Ethernet",
   1256 	  WM_T_82546_3,		WMP_F_COPPER },
   1257 
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1259 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1260 	  WM_T_82546_3,		WMP_F_COPPER },
   1261 
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1263 	  "Intel PRO/1000MT (82546GB)",
   1264 	  WM_T_82546_3,		WMP_F_COPPER },
   1265 
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1267 	  "Intel i82541EI 1000BASE-T Ethernet",
   1268 	  WM_T_82541,		WMP_F_COPPER },
   1269 
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1271 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1272 	  WM_T_82541,		WMP_F_COPPER },
   1273 
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1275 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1276 	  WM_T_82541,		WMP_F_COPPER },
   1277 
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1279 	  "Intel i82541ER 1000BASE-T Ethernet",
   1280 	  WM_T_82541_2,		WMP_F_COPPER },
   1281 
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1283 	  "Intel i82541GI 1000BASE-T Ethernet",
   1284 	  WM_T_82541_2,		WMP_F_COPPER },
   1285 
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1287 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1288 	  WM_T_82541_2,		WMP_F_COPPER },
   1289 
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1291 	  "Intel i82541PI 1000BASE-T Ethernet",
   1292 	  WM_T_82541_2,		WMP_F_COPPER },
   1293 
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1295 	  "Intel i82547EI 1000BASE-T Ethernet",
   1296 	  WM_T_82547,		WMP_F_COPPER },
   1297 
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1299 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1300 	  WM_T_82547,		WMP_F_COPPER },
   1301 
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1303 	  "Intel i82547GI 1000BASE-T Ethernet",
   1304 	  WM_T_82547_2,		WMP_F_COPPER },
   1305 
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1307 	  "Intel PRO/1000 PT (82571EB)",
   1308 	  WM_T_82571,		WMP_F_COPPER },
   1309 
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1311 	  "Intel PRO/1000 PF (82571EB)",
   1312 	  WM_T_82571,		WMP_F_FIBER },
   1313 
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1315 	  "Intel PRO/1000 PB (82571EB)",
   1316 	  WM_T_82571,		WMP_F_SERDES },
   1317 
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1319 	  "Intel PRO/1000 QT (82571EB)",
   1320 	  WM_T_82571,		WMP_F_COPPER },
   1321 
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1323 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1324 	  WM_T_82571,		WMP_F_COPPER },
   1325 
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1327 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1328 	  WM_T_82571,		WMP_F_COPPER },
   1329 
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1331 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1332 	  WM_T_82571,		WMP_F_SERDES },
   1333 
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1335 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1336 	  WM_T_82571,		WMP_F_SERDES },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1339 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1340 	  WM_T_82571,		WMP_F_FIBER },
   1341 
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1343 	  "Intel i82572EI 1000baseT Ethernet",
   1344 	  WM_T_82572,		WMP_F_COPPER },
   1345 
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1347 	  "Intel i82572EI 1000baseX Ethernet",
   1348 	  WM_T_82572,		WMP_F_FIBER },
   1349 
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1351 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1352 	  WM_T_82572,		WMP_F_SERDES },
   1353 
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1355 	  "Intel i82572EI 1000baseT Ethernet",
   1356 	  WM_T_82572,		WMP_F_COPPER },
   1357 
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1359 	  "Intel i82573E",
   1360 	  WM_T_82573,		WMP_F_COPPER },
   1361 
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1363 	  "Intel i82573E IAMT",
   1364 	  WM_T_82573,		WMP_F_COPPER },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1367 	  "Intel i82573L Gigabit Ethernet",
   1368 	  WM_T_82573,		WMP_F_COPPER },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1371 	  "Intel i82574L",
   1372 	  WM_T_82574,		WMP_F_COPPER },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1375 	  "Intel i82574L",
   1376 	  WM_T_82574,		WMP_F_COPPER },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1379 	  "Intel i82583V",
   1380 	  WM_T_82583,		WMP_F_COPPER },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1383 	  "i80003 dual 1000baseT Ethernet",
   1384 	  WM_T_80003,		WMP_F_COPPER },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1387 	  "i80003 dual 1000baseX Ethernet",
   1388 	  WM_T_80003,		WMP_F_COPPER },
   1389 
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1391 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1392 	  WM_T_80003,		WMP_F_SERDES },
   1393 
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1395 	  "Intel i80003 1000baseT Ethernet",
   1396 	  WM_T_80003,		WMP_F_COPPER },
   1397 
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1399 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1400 	  WM_T_80003,		WMP_F_SERDES },
   1401 
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1403 	  "Intel i82801H (M_AMT) LAN Controller",
   1404 	  WM_T_ICH8,		WMP_F_COPPER },
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1406 	  "Intel i82801H (AMT) LAN Controller",
   1407 	  WM_T_ICH8,		WMP_F_COPPER },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1409 	  "Intel i82801H LAN Controller",
   1410 	  WM_T_ICH8,		WMP_F_COPPER },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1412 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1413 	  WM_T_ICH8,		WMP_F_COPPER },
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1415 	  "Intel i82801H (M) LAN Controller",
   1416 	  WM_T_ICH8,		WMP_F_COPPER },
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1418 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1419 	  WM_T_ICH8,		WMP_F_COPPER },
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1421 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1422 	  WM_T_ICH8,		WMP_F_COPPER },
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1424 	  "82567V-3 LAN Controller",
   1425 	  WM_T_ICH8,		WMP_F_COPPER },
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1427 	  "82801I (AMT) LAN Controller",
   1428 	  WM_T_ICH9,		WMP_F_COPPER },
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1430 	  "82801I 10/100 LAN Controller",
   1431 	  WM_T_ICH9,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1433 	  "82801I (G) 10/100 LAN Controller",
   1434 	  WM_T_ICH9,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1436 	  "82801I (GT) 10/100 LAN Controller",
   1437 	  WM_T_ICH9,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1439 	  "82801I (C) LAN Controller",
   1440 	  WM_T_ICH9,		WMP_F_COPPER },
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1442 	  "82801I mobile LAN Controller",
   1443 	  WM_T_ICH9,		WMP_F_COPPER },
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1445 	  "82801I mobile (V) LAN Controller",
   1446 	  WM_T_ICH9,		WMP_F_COPPER },
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1448 	  "82801I mobile (AMT) LAN Controller",
   1449 	  WM_T_ICH9,		WMP_F_COPPER },
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1451 	  "82567LM-4 LAN Controller",
   1452 	  WM_T_ICH9,		WMP_F_COPPER },
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1454 	  "82567LM-2 LAN Controller",
   1455 	  WM_T_ICH10,		WMP_F_COPPER },
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1457 	  "82567LF-2 LAN Controller",
   1458 	  WM_T_ICH10,		WMP_F_COPPER },
   1459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1460 	  "82567LM-3 LAN Controller",
   1461 	  WM_T_ICH10,		WMP_F_COPPER },
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1463 	  "82567LF-3 LAN Controller",
   1464 	  WM_T_ICH10,		WMP_F_COPPER },
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1466 	  "82567V-2 LAN Controller",
   1467 	  WM_T_ICH10,		WMP_F_COPPER },
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1469 	  "82567V-3? LAN Controller",
   1470 	  WM_T_ICH10,		WMP_F_COPPER },
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1472 	  "HANKSVILLE LAN Controller",
   1473 	  WM_T_ICH10,		WMP_F_COPPER },
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1475 	  "PCH LAN (82577LM) Controller",
   1476 	  WM_T_PCH,		WMP_F_COPPER },
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1478 	  "PCH LAN (82577LC) Controller",
   1479 	  WM_T_PCH,		WMP_F_COPPER },
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1481 	  "PCH LAN (82578DM) Controller",
   1482 	  WM_T_PCH,		WMP_F_COPPER },
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1484 	  "PCH LAN (82578DC) Controller",
   1485 	  WM_T_PCH,		WMP_F_COPPER },
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1487 	  "PCH2 LAN (82579LM) Controller",
   1488 	  WM_T_PCH2,		WMP_F_COPPER },
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1490 	  "PCH2 LAN (82579V) Controller",
   1491 	  WM_T_PCH2,		WMP_F_COPPER },
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1493 	  "82575EB dual-1000baseT Ethernet",
   1494 	  WM_T_82575,		WMP_F_COPPER },
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1496 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1497 	  WM_T_82575,		WMP_F_SERDES },
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1499 	  "82575GB quad-1000baseT Ethernet",
   1500 	  WM_T_82575,		WMP_F_COPPER },
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1502 	  "82575GB quad-1000baseT Ethernet (PM)",
   1503 	  WM_T_82575,		WMP_F_COPPER },
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1505 	  "82576 1000BaseT Ethernet",
   1506 	  WM_T_82576,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1508 	  "82576 1000BaseX Ethernet",
   1509 	  WM_T_82576,		WMP_F_FIBER },
   1510 
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1512 	  "82576 gigabit Ethernet (SERDES)",
   1513 	  WM_T_82576,		WMP_F_SERDES },
   1514 
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1516 	  "82576 quad-1000BaseT Ethernet",
   1517 	  WM_T_82576,		WMP_F_COPPER },
   1518 
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1520 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1521 	  WM_T_82576,		WMP_F_COPPER },
   1522 
   1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1524 	  "82576 gigabit Ethernet",
   1525 	  WM_T_82576,		WMP_F_COPPER },
   1526 
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1528 	  "82576 gigabit Ethernet (SERDES)",
   1529 	  WM_T_82576,		WMP_F_SERDES },
   1530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1531 	  "82576 quad-gigabit Ethernet (SERDES)",
   1532 	  WM_T_82576,		WMP_F_SERDES },
   1533 
   1534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1535 	  "82580 1000BaseT Ethernet",
   1536 	  WM_T_82580,		WMP_F_COPPER },
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1538 	  "82580 1000BaseX Ethernet",
   1539 	  WM_T_82580,		WMP_F_FIBER },
   1540 
   1541 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1542 	  "82580 1000BaseT Ethernet (SERDES)",
   1543 	  WM_T_82580,		WMP_F_SERDES },
   1544 
   1545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1546 	  "82580 gigabit Ethernet (SGMII)",
   1547 	  WM_T_82580,		WMP_F_COPPER },
   1548 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1549 	  "82580 dual-1000BaseT Ethernet",
   1550 	  WM_T_82580,		WMP_F_COPPER },
   1551 
   1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1553 	  "82580 quad-1000BaseX Ethernet",
   1554 	  WM_T_82580,		WMP_F_FIBER },
   1555 
   1556 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1557 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1558 	  WM_T_82580,		WMP_F_COPPER },
   1559 
   1560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1561 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1562 	  WM_T_82580,		WMP_F_SERDES },
   1563 
   1564 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1565 	  "DH89XXCC 1000BASE-KX Ethernet",
   1566 	  WM_T_82580,		WMP_F_SERDES },
   1567 
   1568 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1569 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1570 	  WM_T_82580,		WMP_F_SERDES },
   1571 
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1573 	  "I350 Gigabit Network Connection",
   1574 	  WM_T_I350,		WMP_F_COPPER },
   1575 
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1577 	  "I350 Gigabit Fiber Network Connection",
   1578 	  WM_T_I350,		WMP_F_FIBER },
   1579 
   1580 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1581 	  "I350 Gigabit Backplane Connection",
   1582 	  WM_T_I350,		WMP_F_SERDES },
   1583 
   1584 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1585 	  "I350 Quad Port Gigabit Ethernet",
   1586 	  WM_T_I350,		WMP_F_SERDES },
   1587 
   1588 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1589 	  "I350 Gigabit Connection",
   1590 	  WM_T_I350,		WMP_F_COPPER },
   1591 
   1592 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1593 	  "I354 Gigabit Ethernet (KX)",
   1594 	  WM_T_I354,		WMP_F_SERDES },
   1595 
   1596 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1597 	  "I354 Gigabit Ethernet (SGMII)",
   1598 	  WM_T_I354,		WMP_F_COPPER },
   1599 
   1600 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1601 	  "I354 Gigabit Ethernet (2.5G)",
   1602 	  WM_T_I354,		WMP_F_COPPER },
   1603 
   1604 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1605 	  "I210-T1 Ethernet Server Adapter",
   1606 	  WM_T_I210,		WMP_F_COPPER },
   1607 
   1608 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1609 	  "I210 Ethernet (Copper OEM)",
   1610 	  WM_T_I210,		WMP_F_COPPER },
   1611 
   1612 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1613 	  "I210 Ethernet (Copper IT)",
   1614 	  WM_T_I210,		WMP_F_COPPER },
   1615 
   1616 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1617 	  "I210 Ethernet (Copper, FLASH less)",
   1618 	  WM_T_I210,		WMP_F_COPPER },
   1619 
   1620 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1621 	  "I210 Gigabit Ethernet (Fiber)",
   1622 	  WM_T_I210,		WMP_F_FIBER },
   1623 
   1624 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1625 	  "I210 Gigabit Ethernet (SERDES)",
   1626 	  WM_T_I210,		WMP_F_SERDES },
   1627 
   1628 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1629 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1630 	  WM_T_I210,		WMP_F_SERDES },
   1631 
   1632 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1633 	  "I210 Gigabit Ethernet (SGMII)",
   1634 	  WM_T_I210,		WMP_F_COPPER },
   1635 
   1636 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1637 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1638 	  WM_T_I210,		WMP_F_COPPER },
   1639 
   1640 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1641 	  "I211 Ethernet (COPPER)",
   1642 	  WM_T_I211,		WMP_F_COPPER },
   1643 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1644 	  "I217 V Ethernet Connection",
   1645 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1647 	  "I217 LM Ethernet Connection",
   1648 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1650 	  "I218 V Ethernet Connection",
   1651 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1652 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1653 	  "I218 V Ethernet Connection",
   1654 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1656 	  "I218 V Ethernet Connection",
   1657 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1659 	  "I218 LM Ethernet Connection",
   1660 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1662 	  "I218 LM Ethernet Connection",
   1663 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1664 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1665 	  "I218 LM Ethernet Connection",
   1666 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1668 	  "I219 LM Ethernet Connection",
   1669 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1670 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1671 	  "I219 LM (2) Ethernet Connection",
   1672 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1674 	  "I219 LM (3) Ethernet Connection",
   1675 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1676 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1677 	  "I219 LM (4) Ethernet Connection",
   1678 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1679 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1680 	  "I219 LM (5) Ethernet Connection",
   1681 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1682 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1683 	  "I219 LM (6) Ethernet Connection",
   1684 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1685 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1686 	  "I219 LM (7) Ethernet Connection",
   1687 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1688 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1689 	  "I219 LM (8) Ethernet Connection",
   1690 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1691 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1692 	  "I219 LM (9) Ethernet Connection",
   1693 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1694 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1695 	  "I219 LM (10) Ethernet Connection",
   1696 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1697 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1698 	  "I219 LM (11) Ethernet Connection",
   1699 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1700 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1701 	  "I219 LM (12) Ethernet Connection",
   1702 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1703 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1704 	  "I219 LM (13) Ethernet Connection",
   1705 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1706 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1707 	  "I219 LM (14) Ethernet Connection",
   1708 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1709 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1710 	  "I219 LM (15) Ethernet Connection",
   1711 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1712 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1713 	  "I219 LM (16) Ethernet Connection",
   1714 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1715 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1716 	  "I219 LM (17) Ethernet Connection",
   1717 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1718 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1719 	  "I219 LM (18) Ethernet Connection",
   1720 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1721 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1722 	  "I219 LM (19) Ethernet Connection",
   1723 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1724 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1725 	  "I219 V Ethernet Connection",
   1726 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1727 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1728 	  "I219 V (2) Ethernet Connection",
   1729 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1730 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1731 	  "I219 V (4) Ethernet Connection",
   1732 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1733 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1734 	  "I219 V (5) Ethernet Connection",
   1735 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1736 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1737 	  "I219 V (6) Ethernet Connection",
   1738 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1739 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1740 	  "I219 V (7) Ethernet Connection",
   1741 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1742 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1743 	  "I219 V (8) Ethernet Connection",
   1744 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1745 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1746 	  "I219 V (9) Ethernet Connection",
   1747 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1748 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1749 	  "I219 V (10) Ethernet Connection",
   1750 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1751 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1752 	  "I219 V (11) Ethernet Connection",
   1753 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1754 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1755 	  "I219 V (12) Ethernet Connection",
   1756 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1757 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1758 	  "I219 V (13) Ethernet Connection",
   1759 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1760 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1761 	  "I219 V (14) Ethernet Connection",
   1762 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1763 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1764 	  "I219 V (15) Ethernet Connection",
   1765 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1766 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1767 	  "I219 V (16) Ethernet Connection",
   1768 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1769 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1770 	  "I219 V (17) Ethernet Connection",
   1771 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1772 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1773 	  "I219 V (18) Ethernet Connection",
   1774 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1775 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1776 	  "I219 V (19) Ethernet Connection",
   1777 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1778 	{ 0,			0,
   1779 	  NULL,
   1780 	  0,			0 },
   1781 };
   1782 
   1783 /*
   1784  * Register read/write functions.
   1785  * Other than CSR_{READ|WRITE}().
   1786  */
   1787 
   1788 #if 0 /* Not currently used */
   1789 static inline uint32_t
   1790 wm_io_read(struct wm_softc *sc, int reg)
   1791 {
   1792 
   1793 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1794 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1795 }
   1796 #endif
   1797 
   1798 static inline void
   1799 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1800 {
   1801 
   1802 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1803 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1804 }
   1805 
   1806 static inline void
   1807 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1808     uint32_t data)
   1809 {
   1810 	uint32_t regval;
   1811 	int i;
   1812 
   1813 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1814 
   1815 	CSR_WRITE(sc, reg, regval);
   1816 
   1817 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1818 		delay(5);
   1819 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1820 			break;
   1821 	}
   1822 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1823 		aprint_error("%s: WARNING:"
   1824 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1825 		    device_xname(sc->sc_dev), reg);
   1826 	}
   1827 }
   1828 
   1829 static inline void
   1830 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1831 {
   1832 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
   1833 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
   1834 }
   1835 
   1836 /*
   1837  * Descriptor sync/init functions.
   1838  */
   1839 static inline void
   1840 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1841 {
   1842 	struct wm_softc *sc = txq->txq_sc;
   1843 
   1844 	/* If it will wrap around, sync to the end of the ring. */
   1845 	if ((start + num) > WM_NTXDESC(txq)) {
   1846 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1847 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1848 		    (WM_NTXDESC(txq) - start), ops);
   1849 		num -= (WM_NTXDESC(txq) - start);
   1850 		start = 0;
   1851 	}
   1852 
   1853 	/* Now sync whatever is left. */
   1854 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1855 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1856 }
   1857 
   1858 static inline void
   1859 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1860 {
   1861 	struct wm_softc *sc = rxq->rxq_sc;
   1862 
   1863 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1864 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1865 }
   1866 
   1867 static inline void
   1868 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1869 {
   1870 	struct wm_softc *sc = rxq->rxq_sc;
   1871 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1872 	struct mbuf *m = rxs->rxs_mbuf;
   1873 
   1874 	/*
   1875 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1876 	 * so that the payload after the Ethernet header is aligned
   1877 	 * to a 4-byte boundary.
   1878 
   1879 	 * XXX BRAINDAMAGE ALERT!
   1880 	 * The stupid chip uses the same size for every buffer, which
   1881 	 * is set in the Receive Control register.  We are using the 2K
   1882 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1883 	 * reason, we can't "scoot" packets longer than the standard
   1884 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1885 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1886 	 * the upper layer copy the headers.
   1887 	 */
   1888 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1889 
   1890 	if (sc->sc_type == WM_T_82574) {
   1891 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1892 		rxd->erx_data.erxd_addr =
   1893 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1894 		rxd->erx_data.erxd_dd = 0;
   1895 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1896 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1897 
   1898 		rxd->nqrx_data.nrxd_paddr =
   1899 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1900 		/* Currently, split header is not supported. */
   1901 		rxd->nqrx_data.nrxd_haddr = 0;
   1902 	} else {
   1903 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1904 
   1905 		wm_set_dma_addr(&rxd->wrx_addr,
   1906 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1907 		rxd->wrx_len = 0;
   1908 		rxd->wrx_cksum = 0;
   1909 		rxd->wrx_status = 0;
   1910 		rxd->wrx_errors = 0;
   1911 		rxd->wrx_special = 0;
   1912 	}
   1913 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1914 
   1915 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1916 }
   1917 
   1918 /*
   1919  * Device driver interface functions and commonly used functions.
   1920  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1921  */
   1922 
   1923 /* Lookup supported device table */
   1924 static const struct wm_product *
   1925 wm_lookup(const struct pci_attach_args *pa)
   1926 {
   1927 	const struct wm_product *wmp;
   1928 
   1929 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1930 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1931 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1932 			return wmp;
   1933 	}
   1934 	return NULL;
   1935 }
   1936 
   1937 /* The match function (ca_match) */
   1938 static int
   1939 wm_match(device_t parent, cfdata_t cf, void *aux)
   1940 {
   1941 	struct pci_attach_args *pa = aux;
   1942 
   1943 	if (wm_lookup(pa) != NULL)
   1944 		return 1;
   1945 
   1946 	return 0;
   1947 }
   1948 
   1949 /* The attach function (ca_attach) */
   1950 static void
   1951 wm_attach(device_t parent, device_t self, void *aux)
   1952 {
   1953 	struct wm_softc *sc = device_private(self);
   1954 	struct pci_attach_args *pa = aux;
   1955 	prop_dictionary_t dict;
   1956 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1957 	pci_chipset_tag_t pc = pa->pa_pc;
   1958 	int counts[PCI_INTR_TYPE_SIZE];
   1959 	pci_intr_type_t max_type;
   1960 	const char *eetype, *xname;
   1961 	bus_space_tag_t memt;
   1962 	bus_space_handle_t memh;
   1963 	bus_size_t memsize;
   1964 	int memh_valid;
   1965 	int i, error;
   1966 	const struct wm_product *wmp;
   1967 	prop_data_t ea;
   1968 	prop_number_t pn;
   1969 	uint8_t enaddr[ETHER_ADDR_LEN];
   1970 	char buf[256];
   1971 	char wqname[MAXCOMLEN];
   1972 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1973 	pcireg_t preg, memtype;
   1974 	uint16_t eeprom_data, apme_mask;
   1975 	bool force_clear_smbi;
   1976 	uint32_t link_mode;
   1977 	uint32_t reg;
   1978 
   1979 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1980 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1981 #endif
   1982 	sc->sc_dev = self;
   1983 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1984 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1985 	sc->sc_core_stopping = false;
   1986 
   1987 	wmp = wm_lookup(pa);
   1988 #ifdef DIAGNOSTIC
   1989 	if (wmp == NULL) {
   1990 		printf("\n");
   1991 		panic("wm_attach: impossible");
   1992 	}
   1993 #endif
   1994 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1995 
   1996 	sc->sc_pc = pa->pa_pc;
   1997 	sc->sc_pcitag = pa->pa_tag;
   1998 
   1999 	if (pci_dma64_available(pa)) {
   2000 		aprint_verbose(", 64-bit DMA");
   2001 		sc->sc_dmat = pa->pa_dmat64;
   2002 	} else {
   2003 		aprint_verbose(", 32-bit DMA");
   2004 		sc->sc_dmat = pa->pa_dmat;
   2005 	}
   2006 
   2007 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   2008 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   2009 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   2010 
   2011 	sc->sc_type = wmp->wmp_type;
   2012 
   2013 	/* Set default function pointers */
   2014 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   2015 	sc->phy.release = sc->nvm.release = wm_put_null;
   2016 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   2017 
   2018 	if (sc->sc_type < WM_T_82543) {
   2019 		if (sc->sc_rev < 2) {
   2020 			aprint_error_dev(sc->sc_dev,
   2021 			    "i82542 must be at least rev. 2\n");
   2022 			return;
   2023 		}
   2024 		if (sc->sc_rev < 3)
   2025 			sc->sc_type = WM_T_82542_2_0;
   2026 	}
   2027 
   2028 	/*
   2029 	 * Disable MSI for Errata:
   2030 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   2031 	 *
   2032 	 *  82544: Errata 25
   2033 	 *  82540: Errata  6 (easy to reproduce device timeout)
   2034 	 *  82545: Errata  4 (easy to reproduce device timeout)
   2035 	 *  82546: Errata 26 (easy to reproduce device timeout)
   2036 	 *  82541: Errata  7 (easy to reproduce device timeout)
   2037 	 *
   2038 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   2039 	 *
   2040 	 *  82571 & 82572: Errata 63
   2041 	 */
   2042 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   2043 	    || (sc->sc_type == WM_T_82572))
   2044 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   2045 
   2046 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2047 	    || (sc->sc_type == WM_T_82580)
   2048 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2049 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2050 		sc->sc_flags |= WM_F_NEWQUEUE;
   2051 
   2052 	/* Set device properties (mactype) */
   2053 	dict = device_properties(sc->sc_dev);
   2054 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   2055 
   2056 	/*
   2057 	 * Map the device.  All devices support memory-mapped acccess,
   2058 	 * and it is really required for normal operation.
   2059 	 */
   2060 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   2061 	switch (memtype) {
   2062 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2063 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2064 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   2065 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   2066 		break;
   2067 	default:
   2068 		memh_valid = 0;
   2069 		break;
   2070 	}
   2071 
   2072 	if (memh_valid) {
   2073 		sc->sc_st = memt;
   2074 		sc->sc_sh = memh;
   2075 		sc->sc_ss = memsize;
   2076 	} else {
   2077 		aprint_error_dev(sc->sc_dev,
   2078 		    "unable to map device registers\n");
   2079 		return;
   2080 	}
   2081 
   2082 	/*
   2083 	 * In addition, i82544 and later support I/O mapped indirect
   2084 	 * register access.  It is not desirable (nor supported in
   2085 	 * this driver) to use it for normal operation, though it is
   2086 	 * required to work around bugs in some chip versions.
   2087 	 */
   2088 	switch (sc->sc_type) {
   2089 	case WM_T_82544:
   2090 	case WM_T_82541:
   2091 	case WM_T_82541_2:
   2092 	case WM_T_82547:
   2093 	case WM_T_82547_2:
   2094 		/* First we have to find the I/O BAR. */
   2095 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2096 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2097 			if (memtype == PCI_MAPREG_TYPE_IO)
   2098 				break;
   2099 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2100 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2101 				i += 4;	/* skip high bits, too */
   2102 		}
   2103 		if (i < PCI_MAPREG_END) {
   2104 			/*
   2105 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2106 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2107 			 * It's no problem because newer chips has no this
   2108 			 * bug.
   2109 			 *
   2110 			 * The i8254x doesn't apparently respond when the
   2111 			 * I/O BAR is 0, which looks somewhat like it's not
   2112 			 * been configured.
   2113 			 */
   2114 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2115 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2116 				aprint_error_dev(sc->sc_dev,
   2117 				    "WARNING: I/O BAR at zero.\n");
   2118 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2119 			    0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
   2120 			    == 0) {
   2121 				sc->sc_flags |= WM_F_IOH_VALID;
   2122 			} else
   2123 				aprint_error_dev(sc->sc_dev,
   2124 				    "WARNING: unable to map I/O space\n");
   2125 		}
   2126 		break;
   2127 	default:
   2128 		break;
   2129 	}
   2130 
   2131 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2132 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2133 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2134 	if (sc->sc_type < WM_T_82542_2_1)
   2135 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2136 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2137 
   2138 	/* Power up chip */
   2139 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2140 	    && error != EOPNOTSUPP) {
   2141 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2142 		return;
   2143 	}
   2144 
   2145 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2146 	/*
   2147 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2148 	 * resource.
   2149 	 */
   2150 	if (sc->sc_nqueues > 1) {
   2151 		max_type = PCI_INTR_TYPE_MSIX;
   2152 		/*
   2153 		 *  82583 has a MSI-X capability in the PCI configuration space
   2154 		 * but it doesn't support it. At least the document doesn't
   2155 		 * say anything about MSI-X.
   2156 		 */
   2157 		counts[PCI_INTR_TYPE_MSIX]
   2158 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2159 	} else {
   2160 		max_type = PCI_INTR_TYPE_MSI;
   2161 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2162 	}
   2163 
   2164 	/* Allocation settings */
   2165 	counts[PCI_INTR_TYPE_MSI] = 1;
   2166 	counts[PCI_INTR_TYPE_INTX] = 1;
   2167 	/* overridden by disable flags */
   2168 	if (wm_disable_msi != 0) {
   2169 		counts[PCI_INTR_TYPE_MSI] = 0;
   2170 		if (wm_disable_msix != 0) {
   2171 			max_type = PCI_INTR_TYPE_INTX;
   2172 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2173 		}
   2174 	} else if (wm_disable_msix != 0) {
   2175 		max_type = PCI_INTR_TYPE_MSI;
   2176 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2177 	}
   2178 
   2179 alloc_retry:
   2180 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2181 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2182 		return;
   2183 	}
   2184 
   2185 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2186 		error = wm_setup_msix(sc);
   2187 		if (error) {
   2188 			pci_intr_release(pc, sc->sc_intrs,
   2189 			    counts[PCI_INTR_TYPE_MSIX]);
   2190 
   2191 			/* Setup for MSI: Disable MSI-X */
   2192 			max_type = PCI_INTR_TYPE_MSI;
   2193 			counts[PCI_INTR_TYPE_MSI] = 1;
   2194 			counts[PCI_INTR_TYPE_INTX] = 1;
   2195 			goto alloc_retry;
   2196 		}
   2197 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2198 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2199 		error = wm_setup_legacy(sc);
   2200 		if (error) {
   2201 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2202 			    counts[PCI_INTR_TYPE_MSI]);
   2203 
   2204 			/* The next try is for INTx: Disable MSI */
   2205 			max_type = PCI_INTR_TYPE_INTX;
   2206 			counts[PCI_INTR_TYPE_INTX] = 1;
   2207 			goto alloc_retry;
   2208 		}
   2209 	} else {
   2210 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2211 		error = wm_setup_legacy(sc);
   2212 		if (error) {
   2213 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2214 			    counts[PCI_INTR_TYPE_INTX]);
   2215 			return;
   2216 		}
   2217 	}
   2218 
   2219 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2220 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2221 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2222 	    WM_WORKQUEUE_FLAGS);
   2223 	if (error) {
   2224 		aprint_error_dev(sc->sc_dev,
   2225 		    "unable to create workqueue\n");
   2226 		goto out;
   2227 	}
   2228 
   2229 	/*
   2230 	 * Check the function ID (unit number of the chip).
   2231 	 */
   2232 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2233 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2234 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2235 	    || (sc->sc_type == WM_T_82580)
   2236 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2237 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2238 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2239 	else
   2240 		sc->sc_funcid = 0;
   2241 
   2242 	/*
   2243 	 * Determine a few things about the bus we're connected to.
   2244 	 */
   2245 	if (sc->sc_type < WM_T_82543) {
   2246 		/* We don't really know the bus characteristics here. */
   2247 		sc->sc_bus_speed = 33;
   2248 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2249 		/*
   2250 		 * CSA (Communication Streaming Architecture) is about as fast
   2251 		 * a 32-bit 66MHz PCI Bus.
   2252 		 */
   2253 		sc->sc_flags |= WM_F_CSA;
   2254 		sc->sc_bus_speed = 66;
   2255 		aprint_verbose_dev(sc->sc_dev,
   2256 		    "Communication Streaming Architecture\n");
   2257 		if (sc->sc_type == WM_T_82547) {
   2258 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2259 			callout_setfunc(&sc->sc_txfifo_ch,
   2260 			    wm_82547_txfifo_stall, sc);
   2261 			aprint_verbose_dev(sc->sc_dev,
   2262 			    "using 82547 Tx FIFO stall work-around\n");
   2263 		}
   2264 	} else if (sc->sc_type >= WM_T_82571) {
   2265 		sc->sc_flags |= WM_F_PCIE;
   2266 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2267 		    && (sc->sc_type != WM_T_ICH10)
   2268 		    && (sc->sc_type != WM_T_PCH)
   2269 		    && (sc->sc_type != WM_T_PCH2)
   2270 		    && (sc->sc_type != WM_T_PCH_LPT)
   2271 		    && (sc->sc_type != WM_T_PCH_SPT)
   2272 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2273 			/* ICH* and PCH* have no PCIe capability registers */
   2274 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2275 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2276 				NULL) == 0)
   2277 				aprint_error_dev(sc->sc_dev,
   2278 				    "unable to find PCIe capability\n");
   2279 		}
   2280 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2281 	} else {
   2282 		reg = CSR_READ(sc, WMREG_STATUS);
   2283 		if (reg & STATUS_BUS64)
   2284 			sc->sc_flags |= WM_F_BUS64;
   2285 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2286 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2287 
   2288 			sc->sc_flags |= WM_F_PCIX;
   2289 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2290 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2291 				aprint_error_dev(sc->sc_dev,
   2292 				    "unable to find PCIX capability\n");
   2293 			else if (sc->sc_type != WM_T_82545_3 &&
   2294 				 sc->sc_type != WM_T_82546_3) {
   2295 				/*
   2296 				 * Work around a problem caused by the BIOS
   2297 				 * setting the max memory read byte count
   2298 				 * incorrectly.
   2299 				 */
   2300 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2301 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2302 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2303 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2304 
   2305 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2306 				    PCIX_CMD_BYTECNT_SHIFT;
   2307 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2308 				    PCIX_STATUS_MAXB_SHIFT;
   2309 				if (bytecnt > maxb) {
   2310 					aprint_verbose_dev(sc->sc_dev,
   2311 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2312 					    512 << bytecnt, 512 << maxb);
   2313 					pcix_cmd = (pcix_cmd &
   2314 					    ~PCIX_CMD_BYTECNT_MASK) |
   2315 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2316 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2317 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2318 					    pcix_cmd);
   2319 				}
   2320 			}
   2321 		}
   2322 		/*
   2323 		 * The quad port adapter is special; it has a PCIX-PCIX
   2324 		 * bridge on the board, and can run the secondary bus at
   2325 		 * a higher speed.
   2326 		 */
   2327 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2328 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2329 								      : 66;
   2330 		} else if (sc->sc_flags & WM_F_PCIX) {
   2331 			switch (reg & STATUS_PCIXSPD_MASK) {
   2332 			case STATUS_PCIXSPD_50_66:
   2333 				sc->sc_bus_speed = 66;
   2334 				break;
   2335 			case STATUS_PCIXSPD_66_100:
   2336 				sc->sc_bus_speed = 100;
   2337 				break;
   2338 			case STATUS_PCIXSPD_100_133:
   2339 				sc->sc_bus_speed = 133;
   2340 				break;
   2341 			default:
   2342 				aprint_error_dev(sc->sc_dev,
   2343 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2344 				    reg & STATUS_PCIXSPD_MASK);
   2345 				sc->sc_bus_speed = 66;
   2346 				break;
   2347 			}
   2348 		} else
   2349 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2350 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2351 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2352 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2353 	}
   2354 
   2355 	/* clear interesting stat counters */
   2356 	CSR_READ(sc, WMREG_COLC);
   2357 	CSR_READ(sc, WMREG_RXERRC);
   2358 
   2359 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2360 	    || (sc->sc_type >= WM_T_ICH8))
   2361 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2362 	if (sc->sc_type >= WM_T_ICH8)
   2363 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2364 
   2365 	/* Set PHY, NVM mutex related stuff */
   2366 	switch (sc->sc_type) {
   2367 	case WM_T_82542_2_0:
   2368 	case WM_T_82542_2_1:
   2369 	case WM_T_82543:
   2370 	case WM_T_82544:
   2371 		/* Microwire */
   2372 		sc->nvm.read = wm_nvm_read_uwire;
   2373 		sc->sc_nvm_wordsize = 64;
   2374 		sc->sc_nvm_addrbits = 6;
   2375 		break;
   2376 	case WM_T_82540:
   2377 	case WM_T_82545:
   2378 	case WM_T_82545_3:
   2379 	case WM_T_82546:
   2380 	case WM_T_82546_3:
   2381 		/* Microwire */
   2382 		sc->nvm.read = wm_nvm_read_uwire;
   2383 		reg = CSR_READ(sc, WMREG_EECD);
   2384 		if (reg & EECD_EE_SIZE) {
   2385 			sc->sc_nvm_wordsize = 256;
   2386 			sc->sc_nvm_addrbits = 8;
   2387 		} else {
   2388 			sc->sc_nvm_wordsize = 64;
   2389 			sc->sc_nvm_addrbits = 6;
   2390 		}
   2391 		sc->sc_flags |= WM_F_LOCK_EECD;
   2392 		sc->nvm.acquire = wm_get_eecd;
   2393 		sc->nvm.release = wm_put_eecd;
   2394 		break;
   2395 	case WM_T_82541:
   2396 	case WM_T_82541_2:
   2397 	case WM_T_82547:
   2398 	case WM_T_82547_2:
   2399 		reg = CSR_READ(sc, WMREG_EECD);
   2400 		/*
   2401 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2402 		 * on 8254[17], so set flags and functios before calling it.
   2403 		 */
   2404 		sc->sc_flags |= WM_F_LOCK_EECD;
   2405 		sc->nvm.acquire = wm_get_eecd;
   2406 		sc->nvm.release = wm_put_eecd;
   2407 		if (reg & EECD_EE_TYPE) {
   2408 			/* SPI */
   2409 			sc->nvm.read = wm_nvm_read_spi;
   2410 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2411 			wm_nvm_set_addrbits_size_eecd(sc);
   2412 		} else {
   2413 			/* Microwire */
   2414 			sc->nvm.read = wm_nvm_read_uwire;
   2415 			if ((reg & EECD_EE_ABITS) != 0) {
   2416 				sc->sc_nvm_wordsize = 256;
   2417 				sc->sc_nvm_addrbits = 8;
   2418 			} else {
   2419 				sc->sc_nvm_wordsize = 64;
   2420 				sc->sc_nvm_addrbits = 6;
   2421 			}
   2422 		}
   2423 		break;
   2424 	case WM_T_82571:
   2425 	case WM_T_82572:
   2426 		/* SPI */
   2427 		sc->nvm.read = wm_nvm_read_eerd;
   2428 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2429 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2430 		wm_nvm_set_addrbits_size_eecd(sc);
   2431 		sc->phy.acquire = wm_get_swsm_semaphore;
   2432 		sc->phy.release = wm_put_swsm_semaphore;
   2433 		sc->nvm.acquire = wm_get_nvm_82571;
   2434 		sc->nvm.release = wm_put_nvm_82571;
   2435 		break;
   2436 	case WM_T_82573:
   2437 	case WM_T_82574:
   2438 	case WM_T_82583:
   2439 		sc->nvm.read = wm_nvm_read_eerd;
   2440 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2441 		if (sc->sc_type == WM_T_82573) {
   2442 			sc->phy.acquire = wm_get_swsm_semaphore;
   2443 			sc->phy.release = wm_put_swsm_semaphore;
   2444 			sc->nvm.acquire = wm_get_nvm_82571;
   2445 			sc->nvm.release = wm_put_nvm_82571;
   2446 		} else {
   2447 			/* Both PHY and NVM use the same semaphore. */
   2448 			sc->phy.acquire = sc->nvm.acquire
   2449 			    = wm_get_swfwhw_semaphore;
   2450 			sc->phy.release = sc->nvm.release
   2451 			    = wm_put_swfwhw_semaphore;
   2452 		}
   2453 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2454 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2455 			sc->sc_nvm_wordsize = 2048;
   2456 		} else {
   2457 			/* SPI */
   2458 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2459 			wm_nvm_set_addrbits_size_eecd(sc);
   2460 		}
   2461 		break;
   2462 	case WM_T_82575:
   2463 	case WM_T_82576:
   2464 	case WM_T_82580:
   2465 	case WM_T_I350:
   2466 	case WM_T_I354:
   2467 	case WM_T_80003:
   2468 		/* SPI */
   2469 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2470 		wm_nvm_set_addrbits_size_eecd(sc);
   2471 		if ((sc->sc_type == WM_T_80003)
   2472 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2473 			sc->nvm.read = wm_nvm_read_eerd;
   2474 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2475 		} else {
   2476 			sc->nvm.read = wm_nvm_read_spi;
   2477 			sc->sc_flags |= WM_F_LOCK_EECD;
   2478 		}
   2479 		sc->phy.acquire = wm_get_phy_82575;
   2480 		sc->phy.release = wm_put_phy_82575;
   2481 		sc->nvm.acquire = wm_get_nvm_80003;
   2482 		sc->nvm.release = wm_put_nvm_80003;
   2483 		break;
   2484 	case WM_T_ICH8:
   2485 	case WM_T_ICH9:
   2486 	case WM_T_ICH10:
   2487 	case WM_T_PCH:
   2488 	case WM_T_PCH2:
   2489 	case WM_T_PCH_LPT:
   2490 		sc->nvm.read = wm_nvm_read_ich8;
   2491 		/* FLASH */
   2492 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2493 		sc->sc_nvm_wordsize = 2048;
   2494 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2495 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2496 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2497 			aprint_error_dev(sc->sc_dev,
   2498 			    "can't map FLASH registers\n");
   2499 			goto out;
   2500 		}
   2501 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2502 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2503 		    ICH_FLASH_SECTOR_SIZE;
   2504 		sc->sc_ich8_flash_bank_size =
   2505 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2506 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2507 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2508 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2509 		sc->sc_flashreg_offset = 0;
   2510 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2511 		sc->phy.release = wm_put_swflag_ich8lan;
   2512 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2513 		sc->nvm.release = wm_put_nvm_ich8lan;
   2514 		break;
   2515 	case WM_T_PCH_SPT:
   2516 	case WM_T_PCH_CNP:
   2517 		sc->nvm.read = wm_nvm_read_spt;
   2518 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2519 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2520 		sc->sc_flasht = sc->sc_st;
   2521 		sc->sc_flashh = sc->sc_sh;
   2522 		sc->sc_ich8_flash_base = 0;
   2523 		sc->sc_nvm_wordsize =
   2524 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2525 		    * NVM_SIZE_MULTIPLIER;
   2526 		/* It is size in bytes, we want words */
   2527 		sc->sc_nvm_wordsize /= 2;
   2528 		/* Assume 2 banks */
   2529 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2530 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2531 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2532 		sc->phy.release = wm_put_swflag_ich8lan;
   2533 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2534 		sc->nvm.release = wm_put_nvm_ich8lan;
   2535 		break;
   2536 	case WM_T_I210:
   2537 	case WM_T_I211:
   2538 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2539 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2540 		if (wm_nvm_flash_presence_i210(sc)) {
   2541 			sc->nvm.read = wm_nvm_read_eerd;
   2542 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2543 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2544 			wm_nvm_set_addrbits_size_eecd(sc);
   2545 		} else {
   2546 			sc->nvm.read = wm_nvm_read_invm;
   2547 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2548 			sc->sc_nvm_wordsize = INVM_SIZE;
   2549 		}
   2550 		sc->phy.acquire = wm_get_phy_82575;
   2551 		sc->phy.release = wm_put_phy_82575;
   2552 		sc->nvm.acquire = wm_get_nvm_80003;
   2553 		sc->nvm.release = wm_put_nvm_80003;
   2554 		break;
   2555 	default:
   2556 		break;
   2557 	}
   2558 
   2559 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2560 	switch (sc->sc_type) {
   2561 	case WM_T_82571:
   2562 	case WM_T_82572:
   2563 		reg = CSR_READ(sc, WMREG_SWSM2);
   2564 		if ((reg & SWSM2_LOCK) == 0) {
   2565 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2566 			force_clear_smbi = true;
   2567 		} else
   2568 			force_clear_smbi = false;
   2569 		break;
   2570 	case WM_T_82573:
   2571 	case WM_T_82574:
   2572 	case WM_T_82583:
   2573 		force_clear_smbi = true;
   2574 		break;
   2575 	default:
   2576 		force_clear_smbi = false;
   2577 		break;
   2578 	}
   2579 	if (force_clear_smbi) {
   2580 		reg = CSR_READ(sc, WMREG_SWSM);
   2581 		if ((reg & SWSM_SMBI) != 0)
   2582 			aprint_error_dev(sc->sc_dev,
   2583 			    "Please update the Bootagent\n");
   2584 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2585 	}
   2586 
   2587 	/*
   2588 	 * Defer printing the EEPROM type until after verifying the checksum
   2589 	 * This allows the EEPROM type to be printed correctly in the case
   2590 	 * that no EEPROM is attached.
   2591 	 */
   2592 	/*
   2593 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2594 	 * this for later, so we can fail future reads from the EEPROM.
   2595 	 */
   2596 	if (wm_nvm_validate_checksum(sc)) {
   2597 		/*
   2598 		 * Read twice again because some PCI-e parts fail the
   2599 		 * first check due to the link being in sleep state.
   2600 		 */
   2601 		if (wm_nvm_validate_checksum(sc))
   2602 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2603 	}
   2604 
   2605 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2606 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2607 	else {
   2608 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2609 		    sc->sc_nvm_wordsize);
   2610 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2611 			aprint_verbose("iNVM");
   2612 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2613 			aprint_verbose("FLASH(HW)");
   2614 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2615 			aprint_verbose("FLASH");
   2616 		else {
   2617 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2618 				eetype = "SPI";
   2619 			else
   2620 				eetype = "MicroWire";
   2621 			aprint_verbose("(%d address bits) %s EEPROM",
   2622 			    sc->sc_nvm_addrbits, eetype);
   2623 		}
   2624 	}
   2625 	wm_nvm_version(sc);
   2626 	aprint_verbose("\n");
   2627 
   2628 	/*
   2629 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2630 	 * incorrect.
   2631 	 */
   2632 	wm_gmii_setup_phytype(sc, 0, 0);
   2633 
   2634 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2635 	switch (sc->sc_type) {
   2636 	case WM_T_ICH8:
   2637 	case WM_T_ICH9:
   2638 	case WM_T_ICH10:
   2639 	case WM_T_PCH:
   2640 	case WM_T_PCH2:
   2641 	case WM_T_PCH_LPT:
   2642 	case WM_T_PCH_SPT:
   2643 	case WM_T_PCH_CNP:
   2644 		apme_mask = WUC_APME;
   2645 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2646 		if ((eeprom_data & apme_mask) != 0)
   2647 			sc->sc_flags |= WM_F_WOL;
   2648 		break;
   2649 	default:
   2650 		break;
   2651 	}
   2652 
   2653 	/* Reset the chip to a known state. */
   2654 	wm_reset(sc);
   2655 
   2656 	/*
   2657 	 * Check for I21[01] PLL workaround.
   2658 	 *
   2659 	 * Three cases:
   2660 	 * a) Chip is I211.
   2661 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2662 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2663 	 */
   2664 	if (sc->sc_type == WM_T_I211)
   2665 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2666 	if (sc->sc_type == WM_T_I210) {
   2667 		if (!wm_nvm_flash_presence_i210(sc))
   2668 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2669 		else if ((sc->sc_nvm_ver_major < 3)
   2670 		    || ((sc->sc_nvm_ver_major == 3)
   2671 			&& (sc->sc_nvm_ver_minor < 25))) {
   2672 			aprint_verbose_dev(sc->sc_dev,
   2673 			    "ROM image version %d.%d is older than 3.25\n",
   2674 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2675 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2676 		}
   2677 	}
   2678 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2679 		wm_pll_workaround_i210(sc);
   2680 
   2681 	wm_get_wakeup(sc);
   2682 
   2683 	/* Non-AMT based hardware can now take control from firmware */
   2684 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2685 		wm_get_hw_control(sc);
   2686 
   2687 	/*
   2688 	 * Read the Ethernet address from the EEPROM, if not first found
   2689 	 * in device properties.
   2690 	 */
   2691 	ea = prop_dictionary_get(dict, "mac-address");
   2692 	if (ea != NULL) {
   2693 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2694 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2695 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2696 	} else {
   2697 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2698 			aprint_error_dev(sc->sc_dev,
   2699 			    "unable to read Ethernet address\n");
   2700 			goto out;
   2701 		}
   2702 	}
   2703 
   2704 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2705 	    ether_sprintf(enaddr));
   2706 
   2707 	/*
   2708 	 * Read the config info from the EEPROM, and set up various
   2709 	 * bits in the control registers based on their contents.
   2710 	 */
   2711 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2712 	if (pn != NULL) {
   2713 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2714 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2715 	} else {
   2716 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2717 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2718 			goto out;
   2719 		}
   2720 	}
   2721 
   2722 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2723 	if (pn != NULL) {
   2724 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2725 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2726 	} else {
   2727 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2728 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2729 			goto out;
   2730 		}
   2731 	}
   2732 
   2733 	/* check for WM_F_WOL */
   2734 	switch (sc->sc_type) {
   2735 	case WM_T_82542_2_0:
   2736 	case WM_T_82542_2_1:
   2737 	case WM_T_82543:
   2738 		/* dummy? */
   2739 		eeprom_data = 0;
   2740 		apme_mask = NVM_CFG3_APME;
   2741 		break;
   2742 	case WM_T_82544:
   2743 		apme_mask = NVM_CFG2_82544_APM_EN;
   2744 		eeprom_data = cfg2;
   2745 		break;
   2746 	case WM_T_82546:
   2747 	case WM_T_82546_3:
   2748 	case WM_T_82571:
   2749 	case WM_T_82572:
   2750 	case WM_T_82573:
   2751 	case WM_T_82574:
   2752 	case WM_T_82583:
   2753 	case WM_T_80003:
   2754 	case WM_T_82575:
   2755 	case WM_T_82576:
   2756 		apme_mask = NVM_CFG3_APME;
   2757 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2758 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2759 		break;
   2760 	case WM_T_82580:
   2761 	case WM_T_I350:
   2762 	case WM_T_I354:
   2763 	case WM_T_I210:
   2764 	case WM_T_I211:
   2765 		apme_mask = NVM_CFG3_APME;
   2766 		wm_nvm_read(sc,
   2767 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2768 		    1, &eeprom_data);
   2769 		break;
   2770 	case WM_T_ICH8:
   2771 	case WM_T_ICH9:
   2772 	case WM_T_ICH10:
   2773 	case WM_T_PCH:
   2774 	case WM_T_PCH2:
   2775 	case WM_T_PCH_LPT:
   2776 	case WM_T_PCH_SPT:
   2777 	case WM_T_PCH_CNP:
   2778 		/* Already checked before wm_reset () */
   2779 		apme_mask = eeprom_data = 0;
   2780 		break;
   2781 	default: /* XXX 82540 */
   2782 		apme_mask = NVM_CFG3_APME;
   2783 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2784 		break;
   2785 	}
   2786 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2787 	if ((eeprom_data & apme_mask) != 0)
   2788 		sc->sc_flags |= WM_F_WOL;
   2789 
   2790 	/*
   2791 	 * We have the eeprom settings, now apply the special cases
   2792 	 * where the eeprom may be wrong or the board won't support
   2793 	 * wake on lan on a particular port
   2794 	 */
   2795 	switch (sc->sc_pcidevid) {
   2796 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2797 		sc->sc_flags &= ~WM_F_WOL;
   2798 		break;
   2799 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2800 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2801 		/* Wake events only supported on port A for dual fiber
   2802 		 * regardless of eeprom setting */
   2803 		if (sc->sc_funcid == 1)
   2804 			sc->sc_flags &= ~WM_F_WOL;
   2805 		break;
   2806 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2807 		/* If quad port adapter, disable WoL on all but port A */
   2808 		if (sc->sc_funcid != 0)
   2809 			sc->sc_flags &= ~WM_F_WOL;
   2810 		break;
   2811 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2812 		/* Wake events only supported on port A for dual fiber
   2813 		 * regardless of eeprom setting */
   2814 		if (sc->sc_funcid == 1)
   2815 			sc->sc_flags &= ~WM_F_WOL;
   2816 		break;
   2817 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2818 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2819 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2820 		/* If quad port adapter, disable WoL on all but port A */
   2821 		if (sc->sc_funcid != 0)
   2822 			sc->sc_flags &= ~WM_F_WOL;
   2823 		break;
   2824 	}
   2825 
   2826 	if (sc->sc_type >= WM_T_82575) {
   2827 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2828 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2829 			    nvmword);
   2830 			if ((sc->sc_type == WM_T_82575) ||
   2831 			    (sc->sc_type == WM_T_82576)) {
   2832 				/* Check NVM for autonegotiation */
   2833 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2834 				    != 0)
   2835 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2836 			}
   2837 			if ((sc->sc_type == WM_T_82575) ||
   2838 			    (sc->sc_type == WM_T_I350)) {
   2839 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2840 					sc->sc_flags |= WM_F_MAS;
   2841 			}
   2842 		}
   2843 	}
   2844 
   2845 	/*
   2846 	 * XXX need special handling for some multiple port cards
   2847 	 * to disable a paticular port.
   2848 	 */
   2849 
   2850 	if (sc->sc_type >= WM_T_82544) {
   2851 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2852 		if (pn != NULL) {
   2853 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2854 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2855 		} else {
   2856 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2857 				aprint_error_dev(sc->sc_dev,
   2858 				    "unable to read SWDPIN\n");
   2859 				goto out;
   2860 			}
   2861 		}
   2862 	}
   2863 
   2864 	if (cfg1 & NVM_CFG1_ILOS)
   2865 		sc->sc_ctrl |= CTRL_ILOS;
   2866 
   2867 	/*
   2868 	 * XXX
   2869 	 * This code isn't correct because pin 2 and 3 are located
   2870 	 * in different position on newer chips. Check all datasheet.
   2871 	 *
   2872 	 * Until resolve this problem, check if a chip < 82580
   2873 	 */
   2874 	if (sc->sc_type <= WM_T_82580) {
   2875 		if (sc->sc_type >= WM_T_82544) {
   2876 			sc->sc_ctrl |=
   2877 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2878 			    CTRL_SWDPIO_SHIFT;
   2879 			sc->sc_ctrl |=
   2880 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2881 			    CTRL_SWDPINS_SHIFT;
   2882 		} else {
   2883 			sc->sc_ctrl |=
   2884 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2885 			    CTRL_SWDPIO_SHIFT;
   2886 		}
   2887 	}
   2888 
   2889 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2890 		wm_nvm_read(sc,
   2891 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2892 		    1, &nvmword);
   2893 		if (nvmword & NVM_CFG3_ILOS)
   2894 			sc->sc_ctrl |= CTRL_ILOS;
   2895 	}
   2896 
   2897 #if 0
   2898 	if (sc->sc_type >= WM_T_82544) {
   2899 		if (cfg1 & NVM_CFG1_IPS0)
   2900 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2901 		if (cfg1 & NVM_CFG1_IPS1)
   2902 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2903 		sc->sc_ctrl_ext |=
   2904 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2905 		    CTRL_EXT_SWDPIO_SHIFT;
   2906 		sc->sc_ctrl_ext |=
   2907 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2908 		    CTRL_EXT_SWDPINS_SHIFT;
   2909 	} else {
   2910 		sc->sc_ctrl_ext |=
   2911 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2912 		    CTRL_EXT_SWDPIO_SHIFT;
   2913 	}
   2914 #endif
   2915 
   2916 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2917 #if 0
   2918 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2919 #endif
   2920 
   2921 	if (sc->sc_type == WM_T_PCH) {
   2922 		uint16_t val;
   2923 
   2924 		/* Save the NVM K1 bit setting */
   2925 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2926 
   2927 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2928 			sc->sc_nvm_k1_enabled = 1;
   2929 		else
   2930 			sc->sc_nvm_k1_enabled = 0;
   2931 	}
   2932 
   2933 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2934 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2935 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2936 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2937 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2938 	    || sc->sc_type == WM_T_82573
   2939 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2940 		/* Copper only */
   2941 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2942 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2943 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2944 	    || (sc->sc_type ==WM_T_I211)) {
   2945 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2946 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2947 		switch (link_mode) {
   2948 		case CTRL_EXT_LINK_MODE_1000KX:
   2949 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2950 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2951 			break;
   2952 		case CTRL_EXT_LINK_MODE_SGMII:
   2953 			if (wm_sgmii_uses_mdio(sc)) {
   2954 				aprint_normal_dev(sc->sc_dev,
   2955 				    "SGMII(MDIO)\n");
   2956 				sc->sc_flags |= WM_F_SGMII;
   2957 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2958 				break;
   2959 			}
   2960 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2961 			/*FALLTHROUGH*/
   2962 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2963 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2964 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2965 				if (link_mode
   2966 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2967 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2968 					sc->sc_flags |= WM_F_SGMII;
   2969 					aprint_verbose_dev(sc->sc_dev,
   2970 					    "SGMII\n");
   2971 				} else {
   2972 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2973 					aprint_verbose_dev(sc->sc_dev,
   2974 					    "SERDES\n");
   2975 				}
   2976 				break;
   2977 			}
   2978 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2979 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2980 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2981 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2982 				sc->sc_flags |= WM_F_SGMII;
   2983 			}
   2984 			/* Do not change link mode for 100BaseFX */
   2985 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2986 				break;
   2987 
   2988 			/* Change current link mode setting */
   2989 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2990 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2991 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2992 			else
   2993 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2994 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2995 			break;
   2996 		case CTRL_EXT_LINK_MODE_GMII:
   2997 		default:
   2998 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2999 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3000 			break;
   3001 		}
   3002 
   3003 		reg &= ~CTRL_EXT_I2C_ENA;
   3004 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   3005 			reg |= CTRL_EXT_I2C_ENA;
   3006 		else
   3007 			reg &= ~CTRL_EXT_I2C_ENA;
   3008 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3009 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   3010 			if (!wm_sgmii_uses_mdio(sc))
   3011 				wm_gmii_setup_phytype(sc, 0, 0);
   3012 			wm_reset_mdicnfg_82580(sc);
   3013 		}
   3014 	} else if (sc->sc_type < WM_T_82543 ||
   3015 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   3016 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   3017 			aprint_error_dev(sc->sc_dev,
   3018 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   3019 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   3020 		}
   3021 	} else {
   3022 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   3023 			aprint_error_dev(sc->sc_dev,
   3024 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   3025 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   3026 		}
   3027 	}
   3028 
   3029 	if (sc->sc_type >= WM_T_PCH2)
   3030 		sc->sc_flags |= WM_F_EEE;
   3031 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   3032 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   3033 		/* XXX: Need special handling for I354. (not yet) */
   3034 		if (sc->sc_type != WM_T_I354)
   3035 			sc->sc_flags |= WM_F_EEE;
   3036 	}
   3037 
   3038 	/*
   3039 	 * The I350 has a bug where it always strips the CRC whether
   3040 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   3041 	 */
   3042 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3043 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3044 		sc->sc_flags |= WM_F_CRC_STRIP;
   3045 
   3046 	/* Set device properties (macflags) */
   3047 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   3048 
   3049 	if (sc->sc_flags != 0) {
   3050 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   3051 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   3052 	}
   3053 
   3054 #ifdef WM_MPSAFE
   3055 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   3056 #else
   3057 	sc->sc_core_lock = NULL;
   3058 #endif
   3059 
   3060 	/* Initialize the media structures accordingly. */
   3061 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   3062 		wm_gmii_mediainit(sc, wmp->wmp_product);
   3063 	else
   3064 		wm_tbi_mediainit(sc); /* All others */
   3065 
   3066 	ifp = &sc->sc_ethercom.ec_if;
   3067 	xname = device_xname(sc->sc_dev);
   3068 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3069 	ifp->if_softc = sc;
   3070 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3071 #ifdef WM_MPSAFE
   3072 	ifp->if_extflags = IFEF_MPSAFE;
   3073 #endif
   3074 	ifp->if_ioctl = wm_ioctl;
   3075 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3076 		ifp->if_start = wm_nq_start;
   3077 		/*
   3078 		 * When the number of CPUs is one and the controller can use
   3079 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3080 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3081 		 * and the other is used for link status changing.
   3082 		 * In this situation, wm_nq_transmit() is disadvantageous
   3083 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3084 		 */
   3085 		if (wm_is_using_multiqueue(sc))
   3086 			ifp->if_transmit = wm_nq_transmit;
   3087 	} else {
   3088 		ifp->if_start = wm_start;
   3089 		/*
   3090 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
   3091 		 * described above.
   3092 		 */
   3093 		if (wm_is_using_multiqueue(sc))
   3094 			ifp->if_transmit = wm_transmit;
   3095 	}
   3096 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3097 	ifp->if_init = wm_init;
   3098 	ifp->if_stop = wm_stop;
   3099 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3100 	IFQ_SET_READY(&ifp->if_snd);
   3101 
   3102 	/* Check for jumbo frame */
   3103 	switch (sc->sc_type) {
   3104 	case WM_T_82573:
   3105 		/* XXX limited to 9234 if ASPM is disabled */
   3106 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3107 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3108 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3109 		break;
   3110 	case WM_T_82571:
   3111 	case WM_T_82572:
   3112 	case WM_T_82574:
   3113 	case WM_T_82583:
   3114 	case WM_T_82575:
   3115 	case WM_T_82576:
   3116 	case WM_T_82580:
   3117 	case WM_T_I350:
   3118 	case WM_T_I354:
   3119 	case WM_T_I210:
   3120 	case WM_T_I211:
   3121 	case WM_T_80003:
   3122 	case WM_T_ICH9:
   3123 	case WM_T_ICH10:
   3124 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3125 	case WM_T_PCH_LPT:
   3126 	case WM_T_PCH_SPT:
   3127 	case WM_T_PCH_CNP:
   3128 		/* XXX limited to 9234 */
   3129 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3130 		break;
   3131 	case WM_T_PCH:
   3132 		/* XXX limited to 4096 */
   3133 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3134 		break;
   3135 	case WM_T_82542_2_0:
   3136 	case WM_T_82542_2_1:
   3137 	case WM_T_ICH8:
   3138 		/* No support for jumbo frame */
   3139 		break;
   3140 	default:
   3141 		/* ETHER_MAX_LEN_JUMBO */
   3142 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3143 		break;
   3144 	}
   3145 
   3146 	/* If we're a i82543 or greater, we can support VLANs. */
   3147 	if (sc->sc_type >= WM_T_82543) {
   3148 		sc->sc_ethercom.ec_capabilities |=
   3149 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3150 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3151 	}
   3152 
   3153 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3154 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3155 
   3156 	/*
   3157 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
   3158 	 * on i82543 and later.
   3159 	 */
   3160 	if (sc->sc_type >= WM_T_82543) {
   3161 		ifp->if_capabilities |=
   3162 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3163 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3164 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3165 		    IFCAP_CSUM_TCPv6_Tx |
   3166 		    IFCAP_CSUM_UDPv6_Tx;
   3167 	}
   3168 
   3169 	/*
   3170 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3171 	 *
   3172 	 *	82541GI (8086:1076) ... no
   3173 	 *	82572EI (8086:10b9) ... yes
   3174 	 */
   3175 	if (sc->sc_type >= WM_T_82571) {
   3176 		ifp->if_capabilities |=
   3177 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3178 	}
   3179 
   3180 	/*
   3181 	 * If we're a i82544 or greater (except i82547), we can do
   3182 	 * TCP segmentation offload.
   3183 	 */
   3184 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
   3185 		ifp->if_capabilities |= IFCAP_TSOv4;
   3186 
   3187 	if (sc->sc_type >= WM_T_82571)
   3188 		ifp->if_capabilities |= IFCAP_TSOv6;
   3189 
   3190 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3191 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3192 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3193 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3194 
   3195 	/* Attach the interface. */
   3196 	if_initialize(ifp);
   3197 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3198 	ether_ifattach(ifp, enaddr);
   3199 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3200 	if_register(ifp);
   3201 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3202 	    RND_FLAG_DEFAULT);
   3203 
   3204 #ifdef WM_EVENT_COUNTERS
   3205 	/* Attach event counters. */
   3206 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3207 	    NULL, xname, "linkintr");
   3208 
   3209 	if (sc->sc_type >= WM_T_82542_2_1) {
   3210 		evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3211 		    NULL, xname, "tx_xoff");
   3212 		evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3213 		    NULL, xname, "tx_xon");
   3214 		evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3215 		    NULL, xname, "rx_xoff");
   3216 		evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3217 		    NULL, xname, "rx_xon");
   3218 		evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3219 		    NULL, xname, "rx_macctl");
   3220 	}
   3221 
   3222 	evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
   3223 	    NULL, xname, "CRC Error");
   3224 	evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
   3225 	    NULL, xname, "Symbol Error");
   3226 
   3227 	if (sc->sc_type >= WM_T_82543) {
   3228 		evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
   3229 		    NULL, xname, "Alignment Error");
   3230 		evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
   3231 		    NULL, xname, "Receive Error");
   3232 		evcnt_attach_dynamic(&sc->sc_ev_cexterr, EVCNT_TYPE_MISC,
   3233 		    NULL, xname, "Carrier Extension Error");
   3234 	}
   3235 
   3236 	evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
   3237 	    NULL, xname, "Missed Packets");
   3238 	evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
   3239 	    NULL, xname, "Collision");
   3240 	evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
   3241 	    NULL, xname, "Sequence Error");
   3242 	evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
   3243 	    NULL, xname, "Receive Length Error");
   3244 	evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
   3245 	    NULL, xname, "Single Collision");
   3246 	evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
   3247 	    NULL, xname, "Excessive Collisions");
   3248 	evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
   3249 	    NULL, xname, "Multiple Collision");
   3250 	evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
   3251 	    NULL, xname, "Late Collisions");
   3252 	evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
   3253 	    NULL, xname, "Defer");
   3254 	evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
   3255 	    NULL, xname, "Good Packets Rx");
   3256 	evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
   3257 	    NULL, xname, "Broadcast Packets Rx");
   3258 	evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
   3259 	    NULL, xname, "Multicast Packets Rx");
   3260 	evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
   3261 	    NULL, xname, "Good Packets Tx");
   3262 	evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
   3263 	    NULL, xname, "Good Octets Rx");
   3264 	evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
   3265 	    NULL, xname, "Good Octets Tx");
   3266 	evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
   3267 	    NULL, xname, "Rx No Buffers");
   3268 	evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
   3269 	    NULL, xname, "Rx Undersize");
   3270 	evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
   3271 	    NULL, xname, "Rx Fragment");
   3272 	evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
   3273 	    NULL, xname, "Rx Oversize");
   3274 	evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
   3275 	    NULL, xname, "Rx Jabber");
   3276 	evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
   3277 	    NULL, xname, "Total Octets Rx");
   3278 	evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
   3279 	    NULL, xname, "Total Octets Tx");
   3280 	evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
   3281 	    NULL, xname, "Total Packets Rx");
   3282 	evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
   3283 	    NULL, xname, "Total Packets Tx");
   3284 	evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
   3285 	    NULL, xname, "Multicast Packets Tx");
   3286 	evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
   3287 	    NULL, xname, "Broadcast Packets Tx Count");
   3288 	evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
   3289 	    NULL, xname, "Packets Rx (64 bytes)");
   3290 	evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
   3291 	    NULL, xname, "Packets Rx (65-127 bytes)");
   3292 	evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
   3293 	    NULL, xname, "Packets Rx (128-255 bytes)");
   3294 	evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
   3295 	    NULL, xname, "Packets Rx (255-511 bytes)");
   3296 	evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
   3297 	    NULL, xname, "Packets Rx (512-1023 bytes)");
   3298 	evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
   3299 	    NULL, xname, "Packets Rx (1024-1522 bytes)");
   3300 	evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
   3301 	    NULL, xname, "Packets Tx (64 bytes)");
   3302 	evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
   3303 	    NULL, xname, "Packets Tx (65-127 bytes)");
   3304 	evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
   3305 	    NULL, xname, "Packets Tx (128-255 bytes)");
   3306 	evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
   3307 	    NULL, xname, "Packets Tx (256-511 bytes)");
   3308 	evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
   3309 	    NULL, xname, "Packets Tx (512-1023 bytes)");
   3310 	evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
   3311 	    NULL, xname, "Packets Tx (1024-1522 Bytes)");
   3312 	evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
   3313 	    NULL, xname, "Interrupt Assertion");
   3314 	evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
   3315 	    NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
   3316 	evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
   3317 	    NULL, xname, "Intr. Cause Rx Abs Timer Expire");
   3318 	evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
   3319 	    NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
   3320 	evcnt_attach_dynamic(&sc->sc_ev_ictxact, EVCNT_TYPE_MISC,
   3321 	    NULL, xname, "Intr. Cause Tx Abs Timer Expire");
   3322 	evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
   3323 	    NULL, xname, "Intr. Cause Tx Queue Empty");
   3324 	evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
   3325 	    NULL, xname, "Intr. Cause Tx Queue Min Thresh");
   3326 	evcnt_attach_dynamic(&sc->sc_ev_icrxdmtc, EVCNT_TYPE_MISC,
   3327 	    NULL, xname, "Intr. Cause Rx Desc Min Thresh");
   3328 	evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
   3329 	    NULL, xname, "Interrupt Cause Receiver Overrun");
   3330 	if (sc->sc_type >= WM_T_82543) {
   3331 		evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
   3332 		    NULL, xname, "Tx with No CRS");
   3333 		evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
   3334 		    NULL, xname, "TCP Segmentation Context Tx");
   3335 		evcnt_attach_dynamic(&sc->sc_ev_tsctfc, EVCNT_TYPE_MISC,
   3336 		    NULL, xname, "TCP Segmentation Context Tx Fail");
   3337 	}
   3338 	if (sc->sc_type >= WM_T_82540) {
   3339 		evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
   3340 		    NULL, xname, "Management Packets RX");
   3341 		evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
   3342 		    NULL, xname, "Management Packets Dropped");
   3343 		evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
   3344 		    NULL, xname, "Management Packets TX");
   3345 	}
   3346 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
   3347 		evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
   3348 		    NULL, xname, "BMC2OS Packets received by host");
   3349 		evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
   3350 		    NULL, xname, "OS2BMC Packets transmitted by host");
   3351 		evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
   3352 		    NULL, xname, "BMC2OS Packets sent by BMC");
   3353 		evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
   3354 		    NULL, xname, "OS2BMC Packets received by BMC");
   3355 	}
   3356 #endif /* WM_EVENT_COUNTERS */
   3357 
   3358 	sc->sc_txrx_use_workqueue = false;
   3359 
   3360 	if (wm_phy_need_linkdown_discard(sc)) {
   3361 		DPRINTF(sc, WM_DEBUG_LINK,
   3362 		    ("%s: %s: Set linkdown discard flag\n",
   3363 			device_xname(sc->sc_dev), __func__));
   3364 		wm_set_linkdown_discard(sc);
   3365 	}
   3366 
   3367 	wm_init_sysctls(sc);
   3368 
   3369 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3370 		pmf_class_network_register(self, ifp);
   3371 	else
   3372 		aprint_error_dev(self, "couldn't establish power handler\n");
   3373 
   3374 	sc->sc_flags |= WM_F_ATTACHED;
   3375 out:
   3376 	return;
   3377 }
   3378 
   3379 /* The detach function (ca_detach) */
   3380 static int
   3381 wm_detach(device_t self, int flags __unused)
   3382 {
   3383 	struct wm_softc *sc = device_private(self);
   3384 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3385 	int i;
   3386 
   3387 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3388 		return 0;
   3389 
   3390 	/* Stop the interface. Callouts are stopped in it. */
   3391 	IFNET_LOCK(ifp);
   3392 	sc->sc_dying = true;
   3393 	wm_stop(ifp, 1);
   3394 	IFNET_UNLOCK(ifp);
   3395 
   3396 	pmf_device_deregister(self);
   3397 
   3398 	sysctl_teardown(&sc->sc_sysctllog);
   3399 
   3400 #ifdef WM_EVENT_COUNTERS
   3401 	evcnt_detach(&sc->sc_ev_linkintr);
   3402 
   3403 	if (sc->sc_type >= WM_T_82542_2_1) {
   3404 		evcnt_detach(&sc->sc_ev_tx_xoff);
   3405 		evcnt_detach(&sc->sc_ev_tx_xon);
   3406 		evcnt_detach(&sc->sc_ev_rx_xoff);
   3407 		evcnt_detach(&sc->sc_ev_rx_xon);
   3408 		evcnt_detach(&sc->sc_ev_rx_macctl);
   3409 	}
   3410 
   3411 	evcnt_detach(&sc->sc_ev_crcerrs);
   3412 	evcnt_detach(&sc->sc_ev_symerrc);
   3413 
   3414 	if (sc->sc_type >= WM_T_82543) {
   3415 		evcnt_detach(&sc->sc_ev_algnerrc);
   3416 		evcnt_detach(&sc->sc_ev_rxerrc);
   3417 		evcnt_detach(&sc->sc_ev_cexterr);
   3418 	}
   3419 	evcnt_detach(&sc->sc_ev_mpc);
   3420 	evcnt_detach(&sc->sc_ev_colc);
   3421 	evcnt_detach(&sc->sc_ev_sec);
   3422 	evcnt_detach(&sc->sc_ev_rlec);
   3423 	evcnt_detach(&sc->sc_ev_scc);
   3424 	evcnt_detach(&sc->sc_ev_ecol);
   3425 	evcnt_detach(&sc->sc_ev_mcc);
   3426 	evcnt_detach(&sc->sc_ev_latecol);
   3427 	evcnt_detach(&sc->sc_ev_dc);
   3428 	evcnt_detach(&sc->sc_ev_gprc);
   3429 	evcnt_detach(&sc->sc_ev_bprc);
   3430 	evcnt_detach(&sc->sc_ev_mprc);
   3431 	evcnt_detach(&sc->sc_ev_gptc);
   3432 	evcnt_detach(&sc->sc_ev_gorc);
   3433 	evcnt_detach(&sc->sc_ev_gotc);
   3434 	evcnt_detach(&sc->sc_ev_rnbc);
   3435 	evcnt_detach(&sc->sc_ev_ruc);
   3436 	evcnt_detach(&sc->sc_ev_rfc);
   3437 	evcnt_detach(&sc->sc_ev_roc);
   3438 	evcnt_detach(&sc->sc_ev_rjc);
   3439 	evcnt_detach(&sc->sc_ev_tor);
   3440 	evcnt_detach(&sc->sc_ev_tot);
   3441 	evcnt_detach(&sc->sc_ev_tpr);
   3442 	evcnt_detach(&sc->sc_ev_tpt);
   3443 	evcnt_detach(&sc->sc_ev_mptc);
   3444 	evcnt_detach(&sc->sc_ev_bptc);
   3445 	evcnt_detach(&sc->sc_ev_prc64);
   3446 	evcnt_detach(&sc->sc_ev_prc127);
   3447 	evcnt_detach(&sc->sc_ev_prc255);
   3448 	evcnt_detach(&sc->sc_ev_prc511);
   3449 	evcnt_detach(&sc->sc_ev_prc1023);
   3450 	evcnt_detach(&sc->sc_ev_prc1522);
   3451 	evcnt_detach(&sc->sc_ev_ptc64);
   3452 	evcnt_detach(&sc->sc_ev_ptc127);
   3453 	evcnt_detach(&sc->sc_ev_ptc255);
   3454 	evcnt_detach(&sc->sc_ev_ptc511);
   3455 	evcnt_detach(&sc->sc_ev_ptc1023);
   3456 	evcnt_detach(&sc->sc_ev_ptc1522);
   3457 	evcnt_detach(&sc->sc_ev_iac);
   3458 	evcnt_detach(&sc->sc_ev_icrxptc);
   3459 	evcnt_detach(&sc->sc_ev_icrxatc);
   3460 	evcnt_detach(&sc->sc_ev_ictxptc);
   3461 	evcnt_detach(&sc->sc_ev_ictxact);
   3462 	evcnt_detach(&sc->sc_ev_ictxqec);
   3463 	evcnt_detach(&sc->sc_ev_ictxqmtc);
   3464 	evcnt_detach(&sc->sc_ev_icrxdmtc);
   3465 	evcnt_detach(&sc->sc_ev_icrxoc);
   3466 	if (sc->sc_type >= WM_T_82543) {
   3467 		evcnt_detach(&sc->sc_ev_tncrs);
   3468 		evcnt_detach(&sc->sc_ev_tsctc);
   3469 		evcnt_detach(&sc->sc_ev_tsctfc);
   3470 	}
   3471 	if (sc->sc_type >= WM_T_82540) {
   3472 		evcnt_detach(&sc->sc_ev_mgtprc);
   3473 		evcnt_detach(&sc->sc_ev_mgtpdc);
   3474 		evcnt_detach(&sc->sc_ev_mgtptc);
   3475 	}
   3476 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
   3477 		evcnt_detach(&sc->sc_ev_b2ogprc);
   3478 		evcnt_detach(&sc->sc_ev_o2bspc);
   3479 		evcnt_detach(&sc->sc_ev_b2ospc);
   3480 		evcnt_detach(&sc->sc_ev_o2bgptc);
   3481 	}
   3482 #endif /* WM_EVENT_COUNTERS */
   3483 
   3484 	rnd_detach_source(&sc->rnd_source);
   3485 
   3486 	/* Tell the firmware about the release */
   3487 	WM_CORE_LOCK(sc);
   3488 	wm_release_manageability(sc);
   3489 	wm_release_hw_control(sc);
   3490 	wm_enable_wakeup(sc);
   3491 	WM_CORE_UNLOCK(sc);
   3492 
   3493 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3494 
   3495 	ether_ifdetach(ifp);
   3496 	if_detach(ifp);
   3497 	if_percpuq_destroy(sc->sc_ipq);
   3498 
   3499 	/* Delete all remaining media. */
   3500 	ifmedia_fini(&sc->sc_mii.mii_media);
   3501 
   3502 	/* Unload RX dmamaps and free mbufs */
   3503 	for (i = 0; i < sc->sc_nqueues; i++) {
   3504 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3505 		mutex_enter(rxq->rxq_lock);
   3506 		wm_rxdrain(rxq);
   3507 		mutex_exit(rxq->rxq_lock);
   3508 	}
   3509 	/* Must unlock here */
   3510 
   3511 	/* Disestablish the interrupt handler */
   3512 	for (i = 0; i < sc->sc_nintrs; i++) {
   3513 		if (sc->sc_ihs[i] != NULL) {
   3514 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3515 			sc->sc_ihs[i] = NULL;
   3516 		}
   3517 	}
   3518 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3519 
   3520 	/* wm_stop() ensured that the workqueue is stopped. */
   3521 	workqueue_destroy(sc->sc_queue_wq);
   3522 
   3523 	for (i = 0; i < sc->sc_nqueues; i++)
   3524 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3525 
   3526 	wm_free_txrx_queues(sc);
   3527 
   3528 	/* Unmap the registers */
   3529 	if (sc->sc_ss) {
   3530 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3531 		sc->sc_ss = 0;
   3532 	}
   3533 	if (sc->sc_ios) {
   3534 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3535 		sc->sc_ios = 0;
   3536 	}
   3537 	if (sc->sc_flashs) {
   3538 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3539 		sc->sc_flashs = 0;
   3540 	}
   3541 
   3542 	if (sc->sc_core_lock)
   3543 		mutex_obj_free(sc->sc_core_lock);
   3544 	if (sc->sc_ich_phymtx)
   3545 		mutex_obj_free(sc->sc_ich_phymtx);
   3546 	if (sc->sc_ich_nvmmtx)
   3547 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3548 
   3549 	return 0;
   3550 }
   3551 
   3552 static bool
   3553 wm_suspend(device_t self, const pmf_qual_t *qual)
   3554 {
   3555 	struct wm_softc *sc = device_private(self);
   3556 
   3557 	wm_release_manageability(sc);
   3558 	wm_release_hw_control(sc);
   3559 	wm_enable_wakeup(sc);
   3560 
   3561 	return true;
   3562 }
   3563 
   3564 static bool
   3565 wm_resume(device_t self, const pmf_qual_t *qual)
   3566 {
   3567 	struct wm_softc *sc = device_private(self);
   3568 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3569 	pcireg_t reg;
   3570 	char buf[256];
   3571 
   3572 	reg = CSR_READ(sc, WMREG_WUS);
   3573 	if (reg != 0) {
   3574 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3575 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3576 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3577 	}
   3578 
   3579 	if (sc->sc_type >= WM_T_PCH2)
   3580 		wm_resume_workarounds_pchlan(sc);
   3581 	IFNET_LOCK(ifp);
   3582 	if ((ifp->if_flags & IFF_UP) == 0) {
   3583 		/* >= PCH_SPT hardware workaround before reset. */
   3584 		if (sc->sc_type >= WM_T_PCH_SPT)
   3585 			wm_flush_desc_rings(sc);
   3586 
   3587 		wm_reset(sc);
   3588 		/* Non-AMT based hardware can now take control from firmware */
   3589 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3590 			wm_get_hw_control(sc);
   3591 		wm_init_manageability(sc);
   3592 	} else {
   3593 		/*
   3594 		 * We called pmf_class_network_register(), so if_init() is
   3595 		 * automatically called when IFF_UP. wm_reset(),
   3596 		 * wm_get_hw_control() and wm_init_manageability() are called
   3597 		 * via wm_init().
   3598 		 */
   3599 	}
   3600 	IFNET_UNLOCK(ifp);
   3601 
   3602 	return true;
   3603 }
   3604 
   3605 /*
   3606  * wm_watchdog:		[ifnet interface function]
   3607  *
   3608  *	Watchdog timer handler.
   3609  */
   3610 static void
   3611 wm_watchdog(struct ifnet *ifp)
   3612 {
   3613 	int qid;
   3614 	struct wm_softc *sc = ifp->if_softc;
   3615 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3616 
   3617 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3618 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3619 
   3620 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3621 	}
   3622 
   3623 	/* IF any of queues hanged up, reset the interface. */
   3624 	if (hang_queue != 0) {
   3625 		(void)wm_init(ifp);
   3626 
   3627 		/*
   3628 		 * There are still some upper layer processing which call
   3629 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3630 		 */
   3631 		/* Try to get more packets going. */
   3632 		ifp->if_start(ifp);
   3633 	}
   3634 }
   3635 
   3636 
   3637 static void
   3638 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3639 {
   3640 
   3641 	mutex_enter(txq->txq_lock);
   3642 	if (txq->txq_sending &&
   3643 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3644 		wm_watchdog_txq_locked(ifp, txq, hang);
   3645 
   3646 	mutex_exit(txq->txq_lock);
   3647 }
   3648 
   3649 static void
   3650 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3651     uint16_t *hang)
   3652 {
   3653 	struct wm_softc *sc = ifp->if_softc;
   3654 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3655 
   3656 	KASSERT(mutex_owned(txq->txq_lock));
   3657 
   3658 	/*
   3659 	 * Since we're using delayed interrupts, sweep up
   3660 	 * before we report an error.
   3661 	 */
   3662 	wm_txeof(txq, UINT_MAX);
   3663 
   3664 	if (txq->txq_sending)
   3665 		*hang |= __BIT(wmq->wmq_id);
   3666 
   3667 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3668 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3669 		    device_xname(sc->sc_dev));
   3670 	} else {
   3671 #ifdef WM_DEBUG
   3672 		int i, j;
   3673 		struct wm_txsoft *txs;
   3674 #endif
   3675 		log(LOG_ERR,
   3676 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3677 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3678 		    txq->txq_next);
   3679 		if_statinc(ifp, if_oerrors);
   3680 #ifdef WM_DEBUG
   3681 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3682 		    i = WM_NEXTTXS(txq, i)) {
   3683 			txs = &txq->txq_soft[i];
   3684 			printf("txs %d tx %d -> %d\n",
   3685 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3686 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3687 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3688 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3689 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3690 					printf("\t %#08x%08x\n",
   3691 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3692 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3693 				} else {
   3694 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3695 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3696 					    txq->txq_descs[j].wtx_addr.wa_low);
   3697 					printf("\t %#04x%02x%02x%08x\n",
   3698 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3699 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3700 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3701 					    txq->txq_descs[j].wtx_cmdlen);
   3702 				}
   3703 				if (j == txs->txs_lastdesc)
   3704 					break;
   3705 			}
   3706 		}
   3707 #endif
   3708 	}
   3709 }
   3710 
   3711 /*
   3712  * wm_tick:
   3713  *
   3714  *	One second timer, used to check link status, sweep up
   3715  *	completed transmit jobs, etc.
   3716  */
   3717 static void
   3718 wm_tick(void *arg)
   3719 {
   3720 	struct wm_softc *sc = arg;
   3721 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3722 	uint64_t crcerrs, algnerrc, symerrc, mpc, colc,  sec, rlec, rxerrc,
   3723 	    cexterr;
   3724 #ifndef WM_MPSAFE
   3725 	int s = splnet();
   3726 #endif
   3727 
   3728 	WM_CORE_LOCK(sc);
   3729 
   3730 	if (sc->sc_core_stopping) {
   3731 		WM_CORE_UNLOCK(sc);
   3732 #ifndef WM_MPSAFE
   3733 		splx(s);
   3734 #endif
   3735 		return;
   3736 	}
   3737 
   3738 	crcerrs = CSR_READ(sc, WMREG_CRCERRS);
   3739 	symerrc = CSR_READ(sc, WMREG_SYMERRC);
   3740 	mpc = CSR_READ(sc, WMREG_MPC);
   3741 	colc = CSR_READ(sc, WMREG_COLC);
   3742 	sec = CSR_READ(sc, WMREG_SEC);
   3743 	rlec = CSR_READ(sc, WMREG_RLEC);
   3744 
   3745 	WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
   3746 	WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
   3747 	WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
   3748 	WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
   3749 	WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
   3750 	WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
   3751 
   3752 	if (sc->sc_type >= WM_T_82542_2_1) {
   3753 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3754 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3755 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3756 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3757 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3758 	}
   3759 	WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
   3760 	WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
   3761 	WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
   3762 	WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
   3763 	WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
   3764 	WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
   3765 	WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
   3766 	WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
   3767 	WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
   3768 
   3769 	WM_EVCNT_ADD(&sc->sc_ev_gorc,
   3770 	    CSR_READ(sc, WMREG_GORCL) + CSR_READ(sc, WMREG_GORCH));
   3771 	WM_EVCNT_ADD(&sc->sc_ev_gotc,
   3772 	    CSR_READ(sc, WMREG_GOTCL) + CSR_READ(sc, WMREG_GOTCH));
   3773 
   3774 	WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
   3775 	WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
   3776 	WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
   3777 	WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
   3778 	WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
   3779 
   3780 	WM_EVCNT_ADD(&sc->sc_ev_tor,
   3781 	    CSR_READ(sc, WMREG_TORL) + CSR_READ(sc, WMREG_TORH));
   3782 	WM_EVCNT_ADD(&sc->sc_ev_tot,
   3783 	    CSR_READ(sc, WMREG_TOTL) + CSR_READ(sc, WMREG_TOTH));
   3784 
   3785 	WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
   3786 	WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
   3787 	WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
   3788 	WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
   3789 	WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
   3790 	WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
   3791 	WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
   3792 	WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
   3793 	WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
   3794 	WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
   3795 	WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
   3796 	WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
   3797 	WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
   3798 	WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
   3799 	WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
   3800 	WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
   3801 	WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
   3802 	WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
   3803 	WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
   3804 	WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
   3805 	WM_EVCNT_ADD(&sc->sc_ev_ictxact, CSR_READ(sc, WMREG_ICTXATC));
   3806 	WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
   3807 	WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc, CSR_READ(sc, WMREG_ICTXQMTC));
   3808 	WM_EVCNT_ADD(&sc->sc_ev_icrxdmtc, CSR_READ(sc, WMREG_ICRXDMTC));
   3809 	WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
   3810 
   3811 	if (sc->sc_type >= WM_T_82543) {
   3812 		algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
   3813 		rxerrc = CSR_READ(sc, WMREG_RXERRC);
   3814 		cexterr = CSR_READ(sc, WMREG_CEXTERR);
   3815 		WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
   3816 		WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
   3817 		WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
   3818 
   3819 		WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
   3820 		WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
   3821 		WM_EVCNT_ADD(&sc->sc_ev_tsctfc, CSR_READ(sc, WMREG_TSCTFC));
   3822 	} else
   3823 		algnerrc = rxerrc = cexterr = 0;
   3824 
   3825 	if (sc->sc_type >= WM_T_82540) {
   3826 		WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
   3827 		WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
   3828 		WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
   3829 	}
   3830 	if (((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003))
   3831 	    && ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0)) {
   3832 		WM_EVCNT_ADD(&sc->sc_ev_b2ogprc, CSR_READ(sc, WMREG_B2OGPRC));
   3833 		WM_EVCNT_ADD(&sc->sc_ev_o2bspc, CSR_READ(sc, WMREG_O2BSPC));
   3834 		WM_EVCNT_ADD(&sc->sc_ev_b2ospc, CSR_READ(sc, WMREG_B2OSPC));
   3835 		WM_EVCNT_ADD(&sc->sc_ev_o2bgptc, CSR_READ(sc, WMREG_O2BGPTC));
   3836 	}
   3837 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3838 	if_statadd_ref(nsr, if_collisions, colc);
   3839 	if_statadd_ref(nsr, if_ierrors,
   3840 	    crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
   3841 	/*
   3842 	 * WMREG_RNBC is incremented when there are no available buffers in host
   3843 	 * memory. It does not mean the number of dropped packets, because an
   3844 	 * Ethernet controller can receive packets in such case if there is
   3845 	 * space in the phy's FIFO.
   3846 	 *
   3847 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3848 	 * own EVCNT instead of if_iqdrops.
   3849 	 */
   3850 	if_statadd_ref(nsr, if_iqdrops, mpc);
   3851 	IF_STAT_PUTREF(ifp);
   3852 
   3853 	if (sc->sc_flags & WM_F_HAS_MII)
   3854 		mii_tick(&sc->sc_mii);
   3855 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3856 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3857 		wm_serdes_tick(sc);
   3858 	else
   3859 		wm_tbi_tick(sc);
   3860 
   3861 	WM_CORE_UNLOCK(sc);
   3862 #ifndef WM_MPSAFE
   3863 	splx(s);
   3864 #endif
   3865 
   3866 	wm_watchdog(ifp);
   3867 
   3868 	callout_schedule(&sc->sc_tick_ch, hz);
   3869 }
   3870 
   3871 static int
   3872 wm_ifflags_cb(struct ethercom *ec)
   3873 {
   3874 	struct ifnet *ifp = &ec->ec_if;
   3875 	struct wm_softc *sc = ifp->if_softc;
   3876 	u_short iffchange;
   3877 	int ecchange;
   3878 	bool needreset = false;
   3879 	int rc = 0;
   3880 
   3881 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3882 		device_xname(sc->sc_dev), __func__));
   3883 
   3884 	KASSERT(IFNET_LOCKED(ifp));
   3885 	WM_CORE_LOCK(sc);
   3886 
   3887 	/*
   3888 	 * Check for if_flags.
   3889 	 * Main usage is to prevent linkdown when opening bpf.
   3890 	 */
   3891 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3892 	sc->sc_if_flags = ifp->if_flags;
   3893 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3894 		needreset = true;
   3895 		goto ec;
   3896 	}
   3897 
   3898 	/* iff related updates */
   3899 	if ((iffchange & IFF_PROMISC) != 0)
   3900 		wm_set_filter(sc);
   3901 
   3902 	wm_set_vlan(sc);
   3903 
   3904 ec:
   3905 	/* Check for ec_capenable. */
   3906 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3907 	sc->sc_ec_capenable = ec->ec_capenable;
   3908 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3909 		needreset = true;
   3910 		goto out;
   3911 	}
   3912 
   3913 	/* ec related updates */
   3914 	wm_set_eee(sc);
   3915 
   3916 out:
   3917 	if (needreset)
   3918 		rc = ENETRESET;
   3919 	WM_CORE_UNLOCK(sc);
   3920 
   3921 	return rc;
   3922 }
   3923 
   3924 static bool
   3925 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3926 {
   3927 
   3928 	switch (sc->sc_phytype) {
   3929 	case WMPHY_82577: /* ihphy */
   3930 	case WMPHY_82578: /* atphy */
   3931 	case WMPHY_82579: /* ihphy */
   3932 	case WMPHY_I217: /* ihphy */
   3933 	case WMPHY_82580: /* ihphy */
   3934 	case WMPHY_I350: /* ihphy */
   3935 		return true;
   3936 	default:
   3937 		return false;
   3938 	}
   3939 }
   3940 
   3941 static void
   3942 wm_set_linkdown_discard(struct wm_softc *sc)
   3943 {
   3944 
   3945 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3946 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3947 
   3948 		mutex_enter(txq->txq_lock);
   3949 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3950 		mutex_exit(txq->txq_lock);
   3951 	}
   3952 }
   3953 
   3954 static void
   3955 wm_clear_linkdown_discard(struct wm_softc *sc)
   3956 {
   3957 
   3958 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3959 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3960 
   3961 		mutex_enter(txq->txq_lock);
   3962 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   3963 		mutex_exit(txq->txq_lock);
   3964 	}
   3965 }
   3966 
   3967 /*
   3968  * wm_ioctl:		[ifnet interface function]
   3969  *
   3970  *	Handle control requests from the operator.
   3971  */
   3972 static int
   3973 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3974 {
   3975 	struct wm_softc *sc = ifp->if_softc;
   3976 	struct ifreq *ifr = (struct ifreq *)data;
   3977 	struct ifaddr *ifa = (struct ifaddr *)data;
   3978 	struct sockaddr_dl *sdl;
   3979 	int error;
   3980 
   3981 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3982 		device_xname(sc->sc_dev), __func__));
   3983 
   3984 	switch (cmd) {
   3985 	case SIOCADDMULTI:
   3986 	case SIOCDELMULTI:
   3987 		break;
   3988 	default:
   3989 		KASSERT(IFNET_LOCKED(ifp));
   3990 	}
   3991 
   3992 #ifndef WM_MPSAFE
   3993 	const int s = splnet();
   3994 #endif
   3995 	switch (cmd) {
   3996 	case SIOCSIFMEDIA:
   3997 		WM_CORE_LOCK(sc);
   3998 		/* Flow control requires full-duplex mode. */
   3999 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   4000 		    (ifr->ifr_media & IFM_FDX) == 0)
   4001 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   4002 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   4003 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   4004 				/* We can do both TXPAUSE and RXPAUSE. */
   4005 				ifr->ifr_media |=
   4006 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   4007 			}
   4008 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   4009 		}
   4010 		WM_CORE_UNLOCK(sc);
   4011 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   4012 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   4013 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
   4014 				DPRINTF(sc, WM_DEBUG_LINK,
   4015 				    ("%s: %s: Set linkdown discard flag\n",
   4016 					device_xname(sc->sc_dev), __func__));
   4017 				wm_set_linkdown_discard(sc);
   4018 			}
   4019 		}
   4020 		break;
   4021 	case SIOCINITIFADDR:
   4022 		WM_CORE_LOCK(sc);
   4023 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   4024 			sdl = satosdl(ifp->if_dl->ifa_addr);
   4025 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   4026 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   4027 			/* Unicast address is the first multicast entry */
   4028 			wm_set_filter(sc);
   4029 			error = 0;
   4030 			WM_CORE_UNLOCK(sc);
   4031 			break;
   4032 		}
   4033 		WM_CORE_UNLOCK(sc);
   4034 		/*FALLTHROUGH*/
   4035 	default:
   4036 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   4037 			if (((ifp->if_flags & IFF_UP) != 0) &&
   4038 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
   4039 				DPRINTF(sc, WM_DEBUG_LINK,
   4040 				    ("%s: %s: Set linkdown discard flag\n",
   4041 					device_xname(sc->sc_dev), __func__));
   4042 				wm_set_linkdown_discard(sc);
   4043 			}
   4044 		}
   4045 #ifdef WM_MPSAFE
   4046 		const int s = splnet();
   4047 #endif
   4048 		/* It may call wm_start, so unlock here */
   4049 		error = ether_ioctl(ifp, cmd, data);
   4050 #ifdef WM_MPSAFE
   4051 		splx(s);
   4052 #endif
   4053 		if (error != ENETRESET)
   4054 			break;
   4055 
   4056 		error = 0;
   4057 
   4058 		if (cmd == SIOCSIFCAP)
   4059 			error = if_init(ifp);
   4060 		else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
   4061 			WM_CORE_LOCK(sc);
   4062 			if (sc->sc_if_flags & IFF_RUNNING) {
   4063 				/*
   4064 				 * Multicast list has changed; set the hardware filter
   4065 				 * accordingly.
   4066 				 */
   4067 				wm_set_filter(sc);
   4068 			}
   4069 			WM_CORE_UNLOCK(sc);
   4070 		}
   4071 		break;
   4072 	}
   4073 
   4074 #ifndef WM_MPSAFE
   4075 	splx(s);
   4076 #endif
   4077 	return error;
   4078 }
   4079 
   4080 /* MAC address related */
   4081 
   4082 /*
   4083  * Get the offset of MAC address and return it.
   4084  * If error occured, use offset 0.
   4085  */
   4086 static uint16_t
   4087 wm_check_alt_mac_addr(struct wm_softc *sc)
   4088 {
   4089 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4090 	uint16_t offset = NVM_OFF_MACADDR;
   4091 
   4092 	/* Try to read alternative MAC address pointer */
   4093 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   4094 		return 0;
   4095 
   4096 	/* Check pointer if it's valid or not. */
   4097 	if ((offset == 0x0000) || (offset == 0xffff))
   4098 		return 0;
   4099 
   4100 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   4101 	/*
   4102 	 * Check whether alternative MAC address is valid or not.
   4103 	 * Some cards have non 0xffff pointer but those don't use
   4104 	 * alternative MAC address in reality.
   4105 	 *
   4106 	 * Check whether the broadcast bit is set or not.
   4107 	 */
   4108 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   4109 		if (((myea[0] & 0xff) & 0x01) == 0)
   4110 			return offset; /* Found */
   4111 
   4112 	/* Not found */
   4113 	return 0;
   4114 }
   4115 
   4116 static int
   4117 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   4118 {
   4119 	uint16_t myea[ETHER_ADDR_LEN / 2];
   4120 	uint16_t offset = NVM_OFF_MACADDR;
   4121 	int do_invert = 0;
   4122 
   4123 	switch (sc->sc_type) {
   4124 	case WM_T_82580:
   4125 	case WM_T_I350:
   4126 	case WM_T_I354:
   4127 		/* EEPROM Top Level Partitioning */
   4128 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   4129 		break;
   4130 	case WM_T_82571:
   4131 	case WM_T_82575:
   4132 	case WM_T_82576:
   4133 	case WM_T_80003:
   4134 	case WM_T_I210:
   4135 	case WM_T_I211:
   4136 		offset = wm_check_alt_mac_addr(sc);
   4137 		if (offset == 0)
   4138 			if ((sc->sc_funcid & 0x01) == 1)
   4139 				do_invert = 1;
   4140 		break;
   4141 	default:
   4142 		if ((sc->sc_funcid & 0x01) == 1)
   4143 			do_invert = 1;
   4144 		break;
   4145 	}
   4146 
   4147 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   4148 		goto bad;
   4149 
   4150 	enaddr[0] = myea[0] & 0xff;
   4151 	enaddr[1] = myea[0] >> 8;
   4152 	enaddr[2] = myea[1] & 0xff;
   4153 	enaddr[3] = myea[1] >> 8;
   4154 	enaddr[4] = myea[2] & 0xff;
   4155 	enaddr[5] = myea[2] >> 8;
   4156 
   4157 	/*
   4158 	 * Toggle the LSB of the MAC address on the second port
   4159 	 * of some dual port cards.
   4160 	 */
   4161 	if (do_invert != 0)
   4162 		enaddr[5] ^= 1;
   4163 
   4164 	return 0;
   4165 
   4166  bad:
   4167 	return -1;
   4168 }
   4169 
   4170 /*
   4171  * wm_set_ral:
   4172  *
   4173  *	Set an entery in the receive address list.
   4174  */
   4175 static void
   4176 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   4177 {
   4178 	uint32_t ral_lo, ral_hi, addrl, addrh;
   4179 	uint32_t wlock_mac;
   4180 	int rv;
   4181 
   4182 	if (enaddr != NULL) {
   4183 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   4184 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   4185 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   4186 		ral_hi |= RAL_AV;
   4187 	} else {
   4188 		ral_lo = 0;
   4189 		ral_hi = 0;
   4190 	}
   4191 
   4192 	switch (sc->sc_type) {
   4193 	case WM_T_82542_2_0:
   4194 	case WM_T_82542_2_1:
   4195 	case WM_T_82543:
   4196 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   4197 		CSR_WRITE_FLUSH(sc);
   4198 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   4199 		CSR_WRITE_FLUSH(sc);
   4200 		break;
   4201 	case WM_T_PCH2:
   4202 	case WM_T_PCH_LPT:
   4203 	case WM_T_PCH_SPT:
   4204 	case WM_T_PCH_CNP:
   4205 		if (idx == 0) {
   4206 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4207 			CSR_WRITE_FLUSH(sc);
   4208 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4209 			CSR_WRITE_FLUSH(sc);
   4210 			return;
   4211 		}
   4212 		if (sc->sc_type != WM_T_PCH2) {
   4213 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   4214 			    FWSM_WLOCK_MAC);
   4215 			addrl = WMREG_SHRAL(idx - 1);
   4216 			addrh = WMREG_SHRAH(idx - 1);
   4217 		} else {
   4218 			wlock_mac = 0;
   4219 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   4220 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   4221 		}
   4222 
   4223 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   4224 			rv = wm_get_swflag_ich8lan(sc);
   4225 			if (rv != 0)
   4226 				return;
   4227 			CSR_WRITE(sc, addrl, ral_lo);
   4228 			CSR_WRITE_FLUSH(sc);
   4229 			CSR_WRITE(sc, addrh, ral_hi);
   4230 			CSR_WRITE_FLUSH(sc);
   4231 			wm_put_swflag_ich8lan(sc);
   4232 		}
   4233 
   4234 		break;
   4235 	default:
   4236 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   4237 		CSR_WRITE_FLUSH(sc);
   4238 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   4239 		CSR_WRITE_FLUSH(sc);
   4240 		break;
   4241 	}
   4242 }
   4243 
   4244 /*
   4245  * wm_mchash:
   4246  *
   4247  *	Compute the hash of the multicast address for the 4096-bit
   4248  *	multicast filter.
   4249  */
   4250 static uint32_t
   4251 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   4252 {
   4253 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   4254 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   4255 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   4256 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   4257 	uint32_t hash;
   4258 
   4259 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4260 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4261 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4262 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4263 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   4264 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   4265 		return (hash & 0x3ff);
   4266 	}
   4267 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   4268 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   4269 
   4270 	return (hash & 0xfff);
   4271 }
   4272 
   4273 /*
   4274  *
   4275  *
   4276  */
   4277 static int
   4278 wm_rar_count(struct wm_softc *sc)
   4279 {
   4280 	int size;
   4281 
   4282 	switch (sc->sc_type) {
   4283 	case WM_T_ICH8:
   4284 		size = WM_RAL_TABSIZE_ICH8 -1;
   4285 		break;
   4286 	case WM_T_ICH9:
   4287 	case WM_T_ICH10:
   4288 	case WM_T_PCH:
   4289 		size = WM_RAL_TABSIZE_ICH8;
   4290 		break;
   4291 	case WM_T_PCH2:
   4292 		size = WM_RAL_TABSIZE_PCH2;
   4293 		break;
   4294 	case WM_T_PCH_LPT:
   4295 	case WM_T_PCH_SPT:
   4296 	case WM_T_PCH_CNP:
   4297 		size = WM_RAL_TABSIZE_PCH_LPT;
   4298 		break;
   4299 	case WM_T_82575:
   4300 	case WM_T_I210:
   4301 	case WM_T_I211:
   4302 		size = WM_RAL_TABSIZE_82575;
   4303 		break;
   4304 	case WM_T_82576:
   4305 	case WM_T_82580:
   4306 		size = WM_RAL_TABSIZE_82576;
   4307 		break;
   4308 	case WM_T_I350:
   4309 	case WM_T_I354:
   4310 		size = WM_RAL_TABSIZE_I350;
   4311 		break;
   4312 	default:
   4313 		size = WM_RAL_TABSIZE;
   4314 	}
   4315 
   4316 	return size;
   4317 }
   4318 
   4319 /*
   4320  * wm_set_filter:
   4321  *
   4322  *	Set up the receive filter.
   4323  */
   4324 static void
   4325 wm_set_filter(struct wm_softc *sc)
   4326 {
   4327 	struct ethercom *ec = &sc->sc_ethercom;
   4328 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   4329 	struct ether_multi *enm;
   4330 	struct ether_multistep step;
   4331 	bus_addr_t mta_reg;
   4332 	uint32_t hash, reg, bit;
   4333 	int i, size, ralmax, rv;
   4334 
   4335 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4336 		device_xname(sc->sc_dev), __func__));
   4337 	KASSERT(WM_CORE_LOCKED(sc));
   4338 
   4339 	if (sc->sc_type >= WM_T_82544)
   4340 		mta_reg = WMREG_CORDOVA_MTA;
   4341 	else
   4342 		mta_reg = WMREG_MTA;
   4343 
   4344 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   4345 
   4346 	if (sc->sc_if_flags & IFF_BROADCAST)
   4347 		sc->sc_rctl |= RCTL_BAM;
   4348 	if (sc->sc_if_flags & IFF_PROMISC) {
   4349 		sc->sc_rctl |= RCTL_UPE;
   4350 		ETHER_LOCK(ec);
   4351 		ec->ec_flags |= ETHER_F_ALLMULTI;
   4352 		ETHER_UNLOCK(ec);
   4353 		goto allmulti;
   4354 	}
   4355 
   4356 	/*
   4357 	 * Set the station address in the first RAL slot, and
   4358 	 * clear the remaining slots.
   4359 	 */
   4360 	size = wm_rar_count(sc);
   4361 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   4362 
   4363 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   4364 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   4365 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   4366 		switch (i) {
   4367 		case 0:
   4368 			/* We can use all entries */
   4369 			ralmax = size;
   4370 			break;
   4371 		case 1:
   4372 			/* Only RAR[0] */
   4373 			ralmax = 1;
   4374 			break;
   4375 		default:
   4376 			/* Available SHRA + RAR[0] */
   4377 			ralmax = i + 1;
   4378 		}
   4379 	} else
   4380 		ralmax = size;
   4381 	for (i = 1; i < size; i++) {
   4382 		if (i < ralmax)
   4383 			wm_set_ral(sc, NULL, i);
   4384 	}
   4385 
   4386 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4387 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4388 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4389 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   4390 		size = WM_ICH8_MC_TABSIZE;
   4391 	else
   4392 		size = WM_MC_TABSIZE;
   4393 	/* Clear out the multicast table. */
   4394 	for (i = 0; i < size; i++) {
   4395 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4396 		CSR_WRITE_FLUSH(sc);
   4397 	}
   4398 
   4399 	ETHER_LOCK(ec);
   4400 	ETHER_FIRST_MULTI(step, ec, enm);
   4401 	while (enm != NULL) {
   4402 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4403 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4404 			ETHER_UNLOCK(ec);
   4405 			/*
   4406 			 * We must listen to a range of multicast addresses.
   4407 			 * For now, just accept all multicasts, rather than
   4408 			 * trying to set only those filter bits needed to match
   4409 			 * the range.  (At this time, the only use of address
   4410 			 * ranges is for IP multicast routing, for which the
   4411 			 * range is big enough to require all bits set.)
   4412 			 */
   4413 			goto allmulti;
   4414 		}
   4415 
   4416 		hash = wm_mchash(sc, enm->enm_addrlo);
   4417 
   4418 		reg = (hash >> 5);
   4419 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4420 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4421 		    || (sc->sc_type == WM_T_PCH2)
   4422 		    || (sc->sc_type == WM_T_PCH_LPT)
   4423 		    || (sc->sc_type == WM_T_PCH_SPT)
   4424 		    || (sc->sc_type == WM_T_PCH_CNP))
   4425 			reg &= 0x1f;
   4426 		else
   4427 			reg &= 0x7f;
   4428 		bit = hash & 0x1f;
   4429 
   4430 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4431 		hash |= 1U << bit;
   4432 
   4433 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4434 			/*
   4435 			 * 82544 Errata 9: Certain register cannot be written
   4436 			 * with particular alignments in PCI-X bus operation
   4437 			 * (FCAH, MTA and VFTA).
   4438 			 */
   4439 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4440 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4441 			CSR_WRITE_FLUSH(sc);
   4442 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4443 			CSR_WRITE_FLUSH(sc);
   4444 		} else {
   4445 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4446 			CSR_WRITE_FLUSH(sc);
   4447 		}
   4448 
   4449 		ETHER_NEXT_MULTI(step, enm);
   4450 	}
   4451 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4452 	ETHER_UNLOCK(ec);
   4453 
   4454 	goto setit;
   4455 
   4456  allmulti:
   4457 	sc->sc_rctl |= RCTL_MPE;
   4458 
   4459  setit:
   4460 	if (sc->sc_type >= WM_T_PCH2) {
   4461 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4462 		    && (ifp->if_mtu > ETHERMTU))
   4463 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4464 		else
   4465 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4466 		if (rv != 0)
   4467 			device_printf(sc->sc_dev,
   4468 			    "Failed to do workaround for jumbo frame.\n");
   4469 	}
   4470 
   4471 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4472 }
   4473 
   4474 /* Reset and init related */
   4475 
   4476 static void
   4477 wm_set_vlan(struct wm_softc *sc)
   4478 {
   4479 
   4480 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4481 		device_xname(sc->sc_dev), __func__));
   4482 
   4483 	/* Deal with VLAN enables. */
   4484 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4485 		sc->sc_ctrl |= CTRL_VME;
   4486 	else
   4487 		sc->sc_ctrl &= ~CTRL_VME;
   4488 
   4489 	/* Write the control registers. */
   4490 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4491 }
   4492 
   4493 static void
   4494 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4495 {
   4496 	uint32_t gcr;
   4497 	pcireg_t ctrl2;
   4498 
   4499 	gcr = CSR_READ(sc, WMREG_GCR);
   4500 
   4501 	/* Only take action if timeout value is defaulted to 0 */
   4502 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4503 		goto out;
   4504 
   4505 	if ((gcr & GCR_CAP_VER2) == 0) {
   4506 		gcr |= GCR_CMPL_TMOUT_10MS;
   4507 		goto out;
   4508 	}
   4509 
   4510 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4511 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4512 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4513 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4514 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4515 
   4516 out:
   4517 	/* Disable completion timeout resend */
   4518 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4519 
   4520 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4521 }
   4522 
   4523 void
   4524 wm_get_auto_rd_done(struct wm_softc *sc)
   4525 {
   4526 	int i;
   4527 
   4528 	/* wait for eeprom to reload */
   4529 	switch (sc->sc_type) {
   4530 	case WM_T_82571:
   4531 	case WM_T_82572:
   4532 	case WM_T_82573:
   4533 	case WM_T_82574:
   4534 	case WM_T_82583:
   4535 	case WM_T_82575:
   4536 	case WM_T_82576:
   4537 	case WM_T_82580:
   4538 	case WM_T_I350:
   4539 	case WM_T_I354:
   4540 	case WM_T_I210:
   4541 	case WM_T_I211:
   4542 	case WM_T_80003:
   4543 	case WM_T_ICH8:
   4544 	case WM_T_ICH9:
   4545 		for (i = 0; i < 10; i++) {
   4546 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4547 				break;
   4548 			delay(1000);
   4549 		}
   4550 		if (i == 10) {
   4551 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4552 			    "complete\n", device_xname(sc->sc_dev));
   4553 		}
   4554 		break;
   4555 	default:
   4556 		break;
   4557 	}
   4558 }
   4559 
   4560 void
   4561 wm_lan_init_done(struct wm_softc *sc)
   4562 {
   4563 	uint32_t reg = 0;
   4564 	int i;
   4565 
   4566 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4567 		device_xname(sc->sc_dev), __func__));
   4568 
   4569 	/* Wait for eeprom to reload */
   4570 	switch (sc->sc_type) {
   4571 	case WM_T_ICH10:
   4572 	case WM_T_PCH:
   4573 	case WM_T_PCH2:
   4574 	case WM_T_PCH_LPT:
   4575 	case WM_T_PCH_SPT:
   4576 	case WM_T_PCH_CNP:
   4577 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4578 			reg = CSR_READ(sc, WMREG_STATUS);
   4579 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4580 				break;
   4581 			delay(100);
   4582 		}
   4583 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4584 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4585 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4586 		}
   4587 		break;
   4588 	default:
   4589 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4590 		    __func__);
   4591 		break;
   4592 	}
   4593 
   4594 	reg &= ~STATUS_LAN_INIT_DONE;
   4595 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4596 }
   4597 
   4598 void
   4599 wm_get_cfg_done(struct wm_softc *sc)
   4600 {
   4601 	int mask;
   4602 	uint32_t reg;
   4603 	int i;
   4604 
   4605 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4606 		device_xname(sc->sc_dev), __func__));
   4607 
   4608 	/* Wait for eeprom to reload */
   4609 	switch (sc->sc_type) {
   4610 	case WM_T_82542_2_0:
   4611 	case WM_T_82542_2_1:
   4612 		/* null */
   4613 		break;
   4614 	case WM_T_82543:
   4615 	case WM_T_82544:
   4616 	case WM_T_82540:
   4617 	case WM_T_82545:
   4618 	case WM_T_82545_3:
   4619 	case WM_T_82546:
   4620 	case WM_T_82546_3:
   4621 	case WM_T_82541:
   4622 	case WM_T_82541_2:
   4623 	case WM_T_82547:
   4624 	case WM_T_82547_2:
   4625 	case WM_T_82573:
   4626 	case WM_T_82574:
   4627 	case WM_T_82583:
   4628 		/* generic */
   4629 		delay(10*1000);
   4630 		break;
   4631 	case WM_T_80003:
   4632 	case WM_T_82571:
   4633 	case WM_T_82572:
   4634 	case WM_T_82575:
   4635 	case WM_T_82576:
   4636 	case WM_T_82580:
   4637 	case WM_T_I350:
   4638 	case WM_T_I354:
   4639 	case WM_T_I210:
   4640 	case WM_T_I211:
   4641 		if (sc->sc_type == WM_T_82571) {
   4642 			/* Only 82571 shares port 0 */
   4643 			mask = EEMNGCTL_CFGDONE_0;
   4644 		} else
   4645 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4646 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4647 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4648 				break;
   4649 			delay(1000);
   4650 		}
   4651 		if (i >= WM_PHY_CFG_TIMEOUT)
   4652 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4653 				device_xname(sc->sc_dev), __func__));
   4654 		break;
   4655 	case WM_T_ICH8:
   4656 	case WM_T_ICH9:
   4657 	case WM_T_ICH10:
   4658 	case WM_T_PCH:
   4659 	case WM_T_PCH2:
   4660 	case WM_T_PCH_LPT:
   4661 	case WM_T_PCH_SPT:
   4662 	case WM_T_PCH_CNP:
   4663 		delay(10*1000);
   4664 		if (sc->sc_type >= WM_T_ICH10)
   4665 			wm_lan_init_done(sc);
   4666 		else
   4667 			wm_get_auto_rd_done(sc);
   4668 
   4669 		/* Clear PHY Reset Asserted bit */
   4670 		reg = CSR_READ(sc, WMREG_STATUS);
   4671 		if ((reg & STATUS_PHYRA) != 0)
   4672 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4673 		break;
   4674 	default:
   4675 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4676 		    __func__);
   4677 		break;
   4678 	}
   4679 }
   4680 
   4681 int
   4682 wm_phy_post_reset(struct wm_softc *sc)
   4683 {
   4684 	device_t dev = sc->sc_dev;
   4685 	uint16_t reg;
   4686 	int rv = 0;
   4687 
   4688 	/* This function is only for ICH8 and newer. */
   4689 	if (sc->sc_type < WM_T_ICH8)
   4690 		return 0;
   4691 
   4692 	if (wm_phy_resetisblocked(sc)) {
   4693 		/* XXX */
   4694 		device_printf(dev, "PHY is blocked\n");
   4695 		return -1;
   4696 	}
   4697 
   4698 	/* Allow time for h/w to get to quiescent state after reset */
   4699 	delay(10*1000);
   4700 
   4701 	/* Perform any necessary post-reset workarounds */
   4702 	if (sc->sc_type == WM_T_PCH)
   4703 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4704 	else if (sc->sc_type == WM_T_PCH2)
   4705 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4706 	if (rv != 0)
   4707 		return rv;
   4708 
   4709 	/* Clear the host wakeup bit after lcd reset */
   4710 	if (sc->sc_type >= WM_T_PCH) {
   4711 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4712 		reg &= ~BM_WUC_HOST_WU_BIT;
   4713 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4714 	}
   4715 
   4716 	/* Configure the LCD with the extended configuration region in NVM */
   4717 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4718 		return rv;
   4719 
   4720 	/* Configure the LCD with the OEM bits in NVM */
   4721 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4722 
   4723 	if (sc->sc_type == WM_T_PCH2) {
   4724 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4725 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4726 			delay(10 * 1000);
   4727 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4728 		}
   4729 		/* Set EEE LPI Update Timer to 200usec */
   4730 		rv = sc->phy.acquire(sc);
   4731 		if (rv)
   4732 			return rv;
   4733 		rv = wm_write_emi_reg_locked(dev,
   4734 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4735 		sc->phy.release(sc);
   4736 	}
   4737 
   4738 	return rv;
   4739 }
   4740 
   4741 /* Only for PCH and newer */
   4742 static int
   4743 wm_write_smbus_addr(struct wm_softc *sc)
   4744 {
   4745 	uint32_t strap, freq;
   4746 	uint16_t phy_data;
   4747 	int rv;
   4748 
   4749 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4750 		device_xname(sc->sc_dev), __func__));
   4751 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4752 
   4753 	strap = CSR_READ(sc, WMREG_STRAP);
   4754 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4755 
   4756 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4757 	if (rv != 0)
   4758 		return rv;
   4759 
   4760 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4761 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4762 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4763 
   4764 	if (sc->sc_phytype == WMPHY_I217) {
   4765 		/* Restore SMBus frequency */
   4766 		if (freq --) {
   4767 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4768 			    | HV_SMB_ADDR_FREQ_HIGH);
   4769 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4770 			    HV_SMB_ADDR_FREQ_LOW);
   4771 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4772 			    HV_SMB_ADDR_FREQ_HIGH);
   4773 		} else
   4774 			DPRINTF(sc, WM_DEBUG_INIT,
   4775 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4776 				device_xname(sc->sc_dev), __func__));
   4777 	}
   4778 
   4779 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4780 	    phy_data);
   4781 }
   4782 
   4783 static int
   4784 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4785 {
   4786 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4787 	uint16_t phy_page = 0;
   4788 	int rv = 0;
   4789 
   4790 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4791 		device_xname(sc->sc_dev), __func__));
   4792 
   4793 	switch (sc->sc_type) {
   4794 	case WM_T_ICH8:
   4795 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4796 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4797 			return 0;
   4798 
   4799 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4800 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4801 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4802 			break;
   4803 		}
   4804 		/* FALLTHROUGH */
   4805 	case WM_T_PCH:
   4806 	case WM_T_PCH2:
   4807 	case WM_T_PCH_LPT:
   4808 	case WM_T_PCH_SPT:
   4809 	case WM_T_PCH_CNP:
   4810 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4811 		break;
   4812 	default:
   4813 		return 0;
   4814 	}
   4815 
   4816 	if ((rv = sc->phy.acquire(sc)) != 0)
   4817 		return rv;
   4818 
   4819 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4820 	if ((reg & sw_cfg_mask) == 0)
   4821 		goto release;
   4822 
   4823 	/*
   4824 	 * Make sure HW does not configure LCD from PHY extended configuration
   4825 	 * before SW configuration
   4826 	 */
   4827 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4828 	if ((sc->sc_type < WM_T_PCH2)
   4829 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4830 		goto release;
   4831 
   4832 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4833 		device_xname(sc->sc_dev), __func__));
   4834 	/* word_addr is in DWORD */
   4835 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4836 
   4837 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4838 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4839 	if (cnf_size == 0)
   4840 		goto release;
   4841 
   4842 	if (((sc->sc_type == WM_T_PCH)
   4843 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4844 	    || (sc->sc_type > WM_T_PCH)) {
   4845 		/*
   4846 		 * HW configures the SMBus address and LEDs when the OEM and
   4847 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4848 		 * are cleared, SW will configure them instead.
   4849 		 */
   4850 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4851 			device_xname(sc->sc_dev), __func__));
   4852 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4853 			goto release;
   4854 
   4855 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4856 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4857 		    (uint16_t)reg);
   4858 		if (rv != 0)
   4859 			goto release;
   4860 	}
   4861 
   4862 	/* Configure LCD from extended configuration region. */
   4863 	for (i = 0; i < cnf_size; i++) {
   4864 		uint16_t reg_data, reg_addr;
   4865 
   4866 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4867 			goto release;
   4868 
   4869 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4870 			goto release;
   4871 
   4872 		if (reg_addr == IGPHY_PAGE_SELECT)
   4873 			phy_page = reg_data;
   4874 
   4875 		reg_addr &= IGPHY_MAXREGADDR;
   4876 		reg_addr |= phy_page;
   4877 
   4878 		KASSERT(sc->phy.writereg_locked != NULL);
   4879 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4880 		    reg_data);
   4881 	}
   4882 
   4883 release:
   4884 	sc->phy.release(sc);
   4885 	return rv;
   4886 }
   4887 
   4888 /*
   4889  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4890  *  @sc:       pointer to the HW structure
   4891  *  @d0_state: boolean if entering d0 or d3 device state
   4892  *
   4893  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4894  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4895  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4896  */
   4897 int
   4898 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4899 {
   4900 	uint32_t mac_reg;
   4901 	uint16_t oem_reg;
   4902 	int rv;
   4903 
   4904 	if (sc->sc_type < WM_T_PCH)
   4905 		return 0;
   4906 
   4907 	rv = sc->phy.acquire(sc);
   4908 	if (rv != 0)
   4909 		return rv;
   4910 
   4911 	if (sc->sc_type == WM_T_PCH) {
   4912 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4913 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4914 			goto release;
   4915 	}
   4916 
   4917 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4918 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4919 		goto release;
   4920 
   4921 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4922 
   4923 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4924 	if (rv != 0)
   4925 		goto release;
   4926 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4927 
   4928 	if (d0_state) {
   4929 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4930 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4931 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4932 			oem_reg |= HV_OEM_BITS_LPLU;
   4933 	} else {
   4934 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4935 		    != 0)
   4936 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4937 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4938 		    != 0)
   4939 			oem_reg |= HV_OEM_BITS_LPLU;
   4940 	}
   4941 
   4942 	/* Set Restart auto-neg to activate the bits */
   4943 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4944 	    && (wm_phy_resetisblocked(sc) == false))
   4945 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4946 
   4947 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4948 
   4949 release:
   4950 	sc->phy.release(sc);
   4951 
   4952 	return rv;
   4953 }
   4954 
   4955 /* Init hardware bits */
   4956 void
   4957 wm_initialize_hardware_bits(struct wm_softc *sc)
   4958 {
   4959 	uint32_t tarc0, tarc1, reg;
   4960 
   4961 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4962 		device_xname(sc->sc_dev), __func__));
   4963 
   4964 	/* For 82571 variant, 80003 and ICHs */
   4965 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4966 	    || (sc->sc_type >= WM_T_80003)) {
   4967 
   4968 		/* Transmit Descriptor Control 0 */
   4969 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4970 		reg |= TXDCTL_COUNT_DESC;
   4971 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4972 
   4973 		/* Transmit Descriptor Control 1 */
   4974 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4975 		reg |= TXDCTL_COUNT_DESC;
   4976 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4977 
   4978 		/* TARC0 */
   4979 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4980 		switch (sc->sc_type) {
   4981 		case WM_T_82571:
   4982 		case WM_T_82572:
   4983 		case WM_T_82573:
   4984 		case WM_T_82574:
   4985 		case WM_T_82583:
   4986 		case WM_T_80003:
   4987 			/* Clear bits 30..27 */
   4988 			tarc0 &= ~__BITS(30, 27);
   4989 			break;
   4990 		default:
   4991 			break;
   4992 		}
   4993 
   4994 		switch (sc->sc_type) {
   4995 		case WM_T_82571:
   4996 		case WM_T_82572:
   4997 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4998 
   4999 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5000 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   5001 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   5002 			/* 8257[12] Errata No.7 */
   5003 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   5004 
   5005 			/* TARC1 bit 28 */
   5006 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5007 				tarc1 &= ~__BIT(28);
   5008 			else
   5009 				tarc1 |= __BIT(28);
   5010 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5011 
   5012 			/*
   5013 			 * 8257[12] Errata No.13
   5014 			 * Disable Dyamic Clock Gating.
   5015 			 */
   5016 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5017 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   5018 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5019 			break;
   5020 		case WM_T_82573:
   5021 		case WM_T_82574:
   5022 		case WM_T_82583:
   5023 			if ((sc->sc_type == WM_T_82574)
   5024 			    || (sc->sc_type == WM_T_82583))
   5025 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   5026 
   5027 			/* Extended Device Control */
   5028 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5029 			reg &= ~__BIT(23);	/* Clear bit 23 */
   5030 			reg |= __BIT(22);	/* Set bit 22 */
   5031 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5032 
   5033 			/* Device Control */
   5034 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   5035 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5036 
   5037 			/* PCIe Control Register */
   5038 			/*
   5039 			 * 82573 Errata (unknown).
   5040 			 *
   5041 			 * 82574 Errata 25 and 82583 Errata 12
   5042 			 * "Dropped Rx Packets":
   5043 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   5044 			 */
   5045 			reg = CSR_READ(sc, WMREG_GCR);
   5046 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   5047 			CSR_WRITE(sc, WMREG_GCR, reg);
   5048 
   5049 			if ((sc->sc_type == WM_T_82574)
   5050 			    || (sc->sc_type == WM_T_82583)) {
   5051 				/*
   5052 				 * Document says this bit must be set for
   5053 				 * proper operation.
   5054 				 */
   5055 				reg = CSR_READ(sc, WMREG_GCR);
   5056 				reg |= __BIT(22);
   5057 				CSR_WRITE(sc, WMREG_GCR, reg);
   5058 
   5059 				/*
   5060 				 * Apply workaround for hardware errata
   5061 				 * documented in errata docs Fixes issue where
   5062 				 * some error prone or unreliable PCIe
   5063 				 * completions are occurring, particularly
   5064 				 * with ASPM enabled. Without fix, issue can
   5065 				 * cause Tx timeouts.
   5066 				 */
   5067 				reg = CSR_READ(sc, WMREG_GCR2);
   5068 				reg |= __BIT(0);
   5069 				CSR_WRITE(sc, WMREG_GCR2, reg);
   5070 			}
   5071 			break;
   5072 		case WM_T_80003:
   5073 			/* TARC0 */
   5074 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   5075 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   5076 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   5077 
   5078 			/* TARC1 bit 28 */
   5079 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5080 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5081 				tarc1 &= ~__BIT(28);
   5082 			else
   5083 				tarc1 |= __BIT(28);
   5084 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5085 			break;
   5086 		case WM_T_ICH8:
   5087 		case WM_T_ICH9:
   5088 		case WM_T_ICH10:
   5089 		case WM_T_PCH:
   5090 		case WM_T_PCH2:
   5091 		case WM_T_PCH_LPT:
   5092 		case WM_T_PCH_SPT:
   5093 		case WM_T_PCH_CNP:
   5094 			/* TARC0 */
   5095 			if (sc->sc_type == WM_T_ICH8) {
   5096 				/* Set TARC0 bits 29 and 28 */
   5097 				tarc0 |= __BITS(29, 28);
   5098 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   5099 				tarc0 |= __BIT(29);
   5100 				/*
   5101 				 *  Drop bit 28. From Linux.
   5102 				 * See I218/I219 spec update
   5103 				 * "5. Buffer Overrun While the I219 is
   5104 				 * Processing DMA Transactions"
   5105 				 */
   5106 				tarc0 &= ~__BIT(28);
   5107 			}
   5108 			/* Set TARC0 bits 23,24,26,27 */
   5109 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   5110 
   5111 			/* CTRL_EXT */
   5112 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5113 			reg |= __BIT(22);	/* Set bit 22 */
   5114 			/*
   5115 			 * Enable PHY low-power state when MAC is at D3
   5116 			 * w/o WoL
   5117 			 */
   5118 			if (sc->sc_type >= WM_T_PCH)
   5119 				reg |= CTRL_EXT_PHYPDEN;
   5120 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5121 
   5122 			/* TARC1 */
   5123 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   5124 			/* bit 28 */
   5125 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   5126 				tarc1 &= ~__BIT(28);
   5127 			else
   5128 				tarc1 |= __BIT(28);
   5129 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   5130 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   5131 
   5132 			/* Device Status */
   5133 			if (sc->sc_type == WM_T_ICH8) {
   5134 				reg = CSR_READ(sc, WMREG_STATUS);
   5135 				reg &= ~__BIT(31);
   5136 				CSR_WRITE(sc, WMREG_STATUS, reg);
   5137 
   5138 			}
   5139 
   5140 			/* IOSFPC */
   5141 			if (sc->sc_type == WM_T_PCH_SPT) {
   5142 				reg = CSR_READ(sc, WMREG_IOSFPC);
   5143 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   5144 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   5145 			}
   5146 			/*
   5147 			 * Work-around descriptor data corruption issue during
   5148 			 * NFS v2 UDP traffic, just disable the NFS filtering
   5149 			 * capability.
   5150 			 */
   5151 			reg = CSR_READ(sc, WMREG_RFCTL);
   5152 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   5153 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5154 			break;
   5155 		default:
   5156 			break;
   5157 		}
   5158 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   5159 
   5160 		switch (sc->sc_type) {
   5161 		/*
   5162 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   5163 		 * Avoid RSS Hash Value bug.
   5164 		 */
   5165 		case WM_T_82571:
   5166 		case WM_T_82572:
   5167 		case WM_T_82573:
   5168 		case WM_T_80003:
   5169 		case WM_T_ICH8:
   5170 			reg = CSR_READ(sc, WMREG_RFCTL);
   5171 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   5172 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5173 			break;
   5174 		case WM_T_82574:
   5175 			/* Use extened Rx descriptor. */
   5176 			reg = CSR_READ(sc, WMREG_RFCTL);
   5177 			reg |= WMREG_RFCTL_EXSTEN;
   5178 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5179 			break;
   5180 		default:
   5181 			break;
   5182 		}
   5183 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   5184 		/*
   5185 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   5186 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   5187 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   5188 		 * Correctly by the Device"
   5189 		 *
   5190 		 * I354(C2000) Errata AVR53:
   5191 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   5192 		 * Hang"
   5193 		 */
   5194 		reg = CSR_READ(sc, WMREG_RFCTL);
   5195 		reg |= WMREG_RFCTL_IPV6EXDIS;
   5196 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   5197 	}
   5198 }
   5199 
   5200 static uint32_t
   5201 wm_rxpbs_adjust_82580(uint32_t val)
   5202 {
   5203 	uint32_t rv = 0;
   5204 
   5205 	if (val < __arraycount(wm_82580_rxpbs_table))
   5206 		rv = wm_82580_rxpbs_table[val];
   5207 
   5208 	return rv;
   5209 }
   5210 
   5211 /*
   5212  * wm_reset_phy:
   5213  *
   5214  *	generic PHY reset function.
   5215  *	Same as e1000_phy_hw_reset_generic()
   5216  */
   5217 static int
   5218 wm_reset_phy(struct wm_softc *sc)
   5219 {
   5220 	uint32_t reg;
   5221 
   5222 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5223 		device_xname(sc->sc_dev), __func__));
   5224 	if (wm_phy_resetisblocked(sc))
   5225 		return -1;
   5226 
   5227 	sc->phy.acquire(sc);
   5228 
   5229 	reg = CSR_READ(sc, WMREG_CTRL);
   5230 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   5231 	CSR_WRITE_FLUSH(sc);
   5232 
   5233 	delay(sc->phy.reset_delay_us);
   5234 
   5235 	CSR_WRITE(sc, WMREG_CTRL, reg);
   5236 	CSR_WRITE_FLUSH(sc);
   5237 
   5238 	delay(150);
   5239 
   5240 	sc->phy.release(sc);
   5241 
   5242 	wm_get_cfg_done(sc);
   5243 	wm_phy_post_reset(sc);
   5244 
   5245 	return 0;
   5246 }
   5247 
   5248 /*
   5249  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   5250  *
   5251  * In i219, the descriptor rings must be emptied before resetting the HW
   5252  * or before changing the device state to D3 during runtime (runtime PM).
   5253  *
   5254  * Failure to do this will cause the HW to enter a unit hang state which can
   5255  * only be released by PCI reset on the device.
   5256  *
   5257  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   5258  */
   5259 static void
   5260 wm_flush_desc_rings(struct wm_softc *sc)
   5261 {
   5262 	pcireg_t preg;
   5263 	uint32_t reg;
   5264 	struct wm_txqueue *txq;
   5265 	wiseman_txdesc_t *txd;
   5266 	int nexttx;
   5267 	uint32_t rctl;
   5268 
   5269 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   5270 
   5271 	/* First, disable MULR fix in FEXTNVM11 */
   5272 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5273 	reg |= FEXTNVM11_DIS_MULRFIX;
   5274 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5275 
   5276 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5277 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   5278 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   5279 		return;
   5280 
   5281 	/*
   5282 	 * Remove all descriptors from the tx_ring.
   5283 	 *
   5284 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   5285 	 * happens when the HW reads the regs. We assign the ring itself as
   5286 	 * the data of the next descriptor. We don't care about the data we are
   5287 	 * about to reset the HW.
   5288 	 */
   5289 #ifdef WM_DEBUG
   5290 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   5291 #endif
   5292 	reg = CSR_READ(sc, WMREG_TCTL);
   5293 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   5294 
   5295 	txq = &sc->sc_queue[0].wmq_txq;
   5296 	nexttx = txq->txq_next;
   5297 	txd = &txq->txq_descs[nexttx];
   5298 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   5299 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   5300 	txd->wtx_fields.wtxu_status = 0;
   5301 	txd->wtx_fields.wtxu_options = 0;
   5302 	txd->wtx_fields.wtxu_vlan = 0;
   5303 
   5304 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5305 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5306 
   5307 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   5308 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   5309 	CSR_WRITE_FLUSH(sc);
   5310 	delay(250);
   5311 
   5312 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   5313 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   5314 		return;
   5315 
   5316 	/*
   5317 	 * Mark all descriptors in the RX ring as consumed and disable the
   5318 	 * rx ring.
   5319 	 */
   5320 #ifdef WM_DEBUG
   5321 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   5322 #endif
   5323 	rctl = CSR_READ(sc, WMREG_RCTL);
   5324 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5325 	CSR_WRITE_FLUSH(sc);
   5326 	delay(150);
   5327 
   5328 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   5329 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   5330 	reg &= 0xffffc000;
   5331 	/*
   5332 	 * Update thresholds: prefetch threshold to 31, host threshold
   5333 	 * to 1 and make sure the granularity is "descriptors" and not
   5334 	 * "cache lines"
   5335 	 */
   5336 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   5337 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   5338 
   5339 	/* Momentarily enable the RX ring for the changes to take effect */
   5340 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   5341 	CSR_WRITE_FLUSH(sc);
   5342 	delay(150);
   5343 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   5344 }
   5345 
   5346 /*
   5347  * wm_reset:
   5348  *
   5349  *	Reset the i82542 chip.
   5350  */
   5351 static void
   5352 wm_reset(struct wm_softc *sc)
   5353 {
   5354 	int phy_reset = 0;
   5355 	int i, error = 0;
   5356 	uint32_t reg;
   5357 	uint16_t kmreg;
   5358 	int rv;
   5359 
   5360 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5361 		device_xname(sc->sc_dev), __func__));
   5362 	KASSERT(sc->sc_type != 0);
   5363 
   5364 	/*
   5365 	 * Allocate on-chip memory according to the MTU size.
   5366 	 * The Packet Buffer Allocation register must be written
   5367 	 * before the chip is reset.
   5368 	 */
   5369 	switch (sc->sc_type) {
   5370 	case WM_T_82547:
   5371 	case WM_T_82547_2:
   5372 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5373 		    PBA_22K : PBA_30K;
   5374 		for (i = 0; i < sc->sc_nqueues; i++) {
   5375 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5376 			txq->txq_fifo_head = 0;
   5377 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   5378 			txq->txq_fifo_size =
   5379 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   5380 			txq->txq_fifo_stall = 0;
   5381 		}
   5382 		break;
   5383 	case WM_T_82571:
   5384 	case WM_T_82572:
   5385 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   5386 	case WM_T_80003:
   5387 		sc->sc_pba = PBA_32K;
   5388 		break;
   5389 	case WM_T_82573:
   5390 		sc->sc_pba = PBA_12K;
   5391 		break;
   5392 	case WM_T_82574:
   5393 	case WM_T_82583:
   5394 		sc->sc_pba = PBA_20K;
   5395 		break;
   5396 	case WM_T_82576:
   5397 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5398 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5399 		break;
   5400 	case WM_T_82580:
   5401 	case WM_T_I350:
   5402 	case WM_T_I354:
   5403 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5404 		break;
   5405 	case WM_T_I210:
   5406 	case WM_T_I211:
   5407 		sc->sc_pba = PBA_34K;
   5408 		break;
   5409 	case WM_T_ICH8:
   5410 		/* Workaround for a bit corruption issue in FIFO memory */
   5411 		sc->sc_pba = PBA_8K;
   5412 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5413 		break;
   5414 	case WM_T_ICH9:
   5415 	case WM_T_ICH10:
   5416 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5417 		    PBA_14K : PBA_10K;
   5418 		break;
   5419 	case WM_T_PCH:
   5420 	case WM_T_PCH2:	/* XXX 14K? */
   5421 	case WM_T_PCH_LPT:
   5422 	case WM_T_PCH_SPT:
   5423 	case WM_T_PCH_CNP:
   5424 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5425 		    PBA_12K : PBA_26K;
   5426 		break;
   5427 	default:
   5428 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5429 		    PBA_40K : PBA_48K;
   5430 		break;
   5431 	}
   5432 	/*
   5433 	 * Only old or non-multiqueue devices have the PBA register
   5434 	 * XXX Need special handling for 82575.
   5435 	 */
   5436 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5437 	    || (sc->sc_type == WM_T_82575))
   5438 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5439 
   5440 	/* Prevent the PCI-E bus from sticking */
   5441 	if (sc->sc_flags & WM_F_PCIE) {
   5442 		int timeout = 800;
   5443 
   5444 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5445 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5446 
   5447 		while (timeout--) {
   5448 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5449 			    == 0)
   5450 				break;
   5451 			delay(100);
   5452 		}
   5453 		if (timeout == 0)
   5454 			device_printf(sc->sc_dev,
   5455 			    "failed to disable bus mastering\n");
   5456 	}
   5457 
   5458 	/* Set the completion timeout for interface */
   5459 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5460 	    || (sc->sc_type == WM_T_82580)
   5461 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5462 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5463 		wm_set_pcie_completion_timeout(sc);
   5464 
   5465 	/* Clear interrupt */
   5466 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5467 	if (wm_is_using_msix(sc)) {
   5468 		if (sc->sc_type != WM_T_82574) {
   5469 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5470 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5471 		} else
   5472 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5473 	}
   5474 
   5475 	/* Stop the transmit and receive processes. */
   5476 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5477 	sc->sc_rctl &= ~RCTL_EN;
   5478 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5479 	CSR_WRITE_FLUSH(sc);
   5480 
   5481 	/* XXX set_tbi_sbp_82543() */
   5482 
   5483 	delay(10*1000);
   5484 
   5485 	/* Must acquire the MDIO ownership before MAC reset */
   5486 	switch (sc->sc_type) {
   5487 	case WM_T_82573:
   5488 	case WM_T_82574:
   5489 	case WM_T_82583:
   5490 		error = wm_get_hw_semaphore_82573(sc);
   5491 		break;
   5492 	default:
   5493 		break;
   5494 	}
   5495 
   5496 	/*
   5497 	 * 82541 Errata 29? & 82547 Errata 28?
   5498 	 * See also the description about PHY_RST bit in CTRL register
   5499 	 * in 8254x_GBe_SDM.pdf.
   5500 	 */
   5501 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5502 		CSR_WRITE(sc, WMREG_CTRL,
   5503 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5504 		CSR_WRITE_FLUSH(sc);
   5505 		delay(5000);
   5506 	}
   5507 
   5508 	switch (sc->sc_type) {
   5509 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5510 	case WM_T_82541:
   5511 	case WM_T_82541_2:
   5512 	case WM_T_82547:
   5513 	case WM_T_82547_2:
   5514 		/*
   5515 		 * On some chipsets, a reset through a memory-mapped write
   5516 		 * cycle can cause the chip to reset before completing the
   5517 		 * write cycle. This causes major headache that can be avoided
   5518 		 * by issuing the reset via indirect register writes through
   5519 		 * I/O space.
   5520 		 *
   5521 		 * So, if we successfully mapped the I/O BAR at attach time,
   5522 		 * use that. Otherwise, try our luck with a memory-mapped
   5523 		 * reset.
   5524 		 */
   5525 		if (sc->sc_flags & WM_F_IOH_VALID)
   5526 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5527 		else
   5528 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5529 		break;
   5530 	case WM_T_82545_3:
   5531 	case WM_T_82546_3:
   5532 		/* Use the shadow control register on these chips. */
   5533 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5534 		break;
   5535 	case WM_T_80003:
   5536 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5537 		sc->phy.acquire(sc);
   5538 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5539 		sc->phy.release(sc);
   5540 		break;
   5541 	case WM_T_ICH8:
   5542 	case WM_T_ICH9:
   5543 	case WM_T_ICH10:
   5544 	case WM_T_PCH:
   5545 	case WM_T_PCH2:
   5546 	case WM_T_PCH_LPT:
   5547 	case WM_T_PCH_SPT:
   5548 	case WM_T_PCH_CNP:
   5549 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5550 		if (wm_phy_resetisblocked(sc) == false) {
   5551 			/*
   5552 			 * Gate automatic PHY configuration by hardware on
   5553 			 * non-managed 82579
   5554 			 */
   5555 			if ((sc->sc_type == WM_T_PCH2)
   5556 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5557 				== 0))
   5558 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5559 
   5560 			reg |= CTRL_PHY_RESET;
   5561 			phy_reset = 1;
   5562 		} else
   5563 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5564 		sc->phy.acquire(sc);
   5565 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5566 		/* Don't insert a completion barrier when reset */
   5567 		delay(20*1000);
   5568 		mutex_exit(sc->sc_ich_phymtx);
   5569 		break;
   5570 	case WM_T_82580:
   5571 	case WM_T_I350:
   5572 	case WM_T_I354:
   5573 	case WM_T_I210:
   5574 	case WM_T_I211:
   5575 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5576 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5577 			CSR_WRITE_FLUSH(sc);
   5578 		delay(5000);
   5579 		break;
   5580 	case WM_T_82542_2_0:
   5581 	case WM_T_82542_2_1:
   5582 	case WM_T_82543:
   5583 	case WM_T_82540:
   5584 	case WM_T_82545:
   5585 	case WM_T_82546:
   5586 	case WM_T_82571:
   5587 	case WM_T_82572:
   5588 	case WM_T_82573:
   5589 	case WM_T_82574:
   5590 	case WM_T_82575:
   5591 	case WM_T_82576:
   5592 	case WM_T_82583:
   5593 	default:
   5594 		/* Everything else can safely use the documented method. */
   5595 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5596 		break;
   5597 	}
   5598 
   5599 	/* Must release the MDIO ownership after MAC reset */
   5600 	switch (sc->sc_type) {
   5601 	case WM_T_82573:
   5602 	case WM_T_82574:
   5603 	case WM_T_82583:
   5604 		if (error == 0)
   5605 			wm_put_hw_semaphore_82573(sc);
   5606 		break;
   5607 	default:
   5608 		break;
   5609 	}
   5610 
   5611 	/* Set Phy Config Counter to 50msec */
   5612 	if (sc->sc_type == WM_T_PCH2) {
   5613 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5614 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5615 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5616 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5617 	}
   5618 
   5619 	if (phy_reset != 0)
   5620 		wm_get_cfg_done(sc);
   5621 
   5622 	/* Reload EEPROM */
   5623 	switch (sc->sc_type) {
   5624 	case WM_T_82542_2_0:
   5625 	case WM_T_82542_2_1:
   5626 	case WM_T_82543:
   5627 	case WM_T_82544:
   5628 		delay(10);
   5629 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5630 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5631 		CSR_WRITE_FLUSH(sc);
   5632 		delay(2000);
   5633 		break;
   5634 	case WM_T_82540:
   5635 	case WM_T_82545:
   5636 	case WM_T_82545_3:
   5637 	case WM_T_82546:
   5638 	case WM_T_82546_3:
   5639 		delay(5*1000);
   5640 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5641 		break;
   5642 	case WM_T_82541:
   5643 	case WM_T_82541_2:
   5644 	case WM_T_82547:
   5645 	case WM_T_82547_2:
   5646 		delay(20000);
   5647 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5648 		break;
   5649 	case WM_T_82571:
   5650 	case WM_T_82572:
   5651 	case WM_T_82573:
   5652 	case WM_T_82574:
   5653 	case WM_T_82583:
   5654 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5655 			delay(10);
   5656 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5657 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5658 			CSR_WRITE_FLUSH(sc);
   5659 		}
   5660 		/* check EECD_EE_AUTORD */
   5661 		wm_get_auto_rd_done(sc);
   5662 		/*
   5663 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5664 		 * is set.
   5665 		 */
   5666 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5667 		    || (sc->sc_type == WM_T_82583))
   5668 			delay(25*1000);
   5669 		break;
   5670 	case WM_T_82575:
   5671 	case WM_T_82576:
   5672 	case WM_T_82580:
   5673 	case WM_T_I350:
   5674 	case WM_T_I354:
   5675 	case WM_T_I210:
   5676 	case WM_T_I211:
   5677 	case WM_T_80003:
   5678 		/* check EECD_EE_AUTORD */
   5679 		wm_get_auto_rd_done(sc);
   5680 		break;
   5681 	case WM_T_ICH8:
   5682 	case WM_T_ICH9:
   5683 	case WM_T_ICH10:
   5684 	case WM_T_PCH:
   5685 	case WM_T_PCH2:
   5686 	case WM_T_PCH_LPT:
   5687 	case WM_T_PCH_SPT:
   5688 	case WM_T_PCH_CNP:
   5689 		break;
   5690 	default:
   5691 		panic("%s: unknown type\n", __func__);
   5692 	}
   5693 
   5694 	/* Check whether EEPROM is present or not */
   5695 	switch (sc->sc_type) {
   5696 	case WM_T_82575:
   5697 	case WM_T_82576:
   5698 	case WM_T_82580:
   5699 	case WM_T_I350:
   5700 	case WM_T_I354:
   5701 	case WM_T_ICH8:
   5702 	case WM_T_ICH9:
   5703 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5704 			/* Not found */
   5705 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5706 			if (sc->sc_type == WM_T_82575)
   5707 				wm_reset_init_script_82575(sc);
   5708 		}
   5709 		break;
   5710 	default:
   5711 		break;
   5712 	}
   5713 
   5714 	if (phy_reset != 0)
   5715 		wm_phy_post_reset(sc);
   5716 
   5717 	if ((sc->sc_type == WM_T_82580)
   5718 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5719 		/* Clear global device reset status bit */
   5720 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5721 	}
   5722 
   5723 	/* Clear any pending interrupt events. */
   5724 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5725 	reg = CSR_READ(sc, WMREG_ICR);
   5726 	if (wm_is_using_msix(sc)) {
   5727 		if (sc->sc_type != WM_T_82574) {
   5728 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5729 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5730 		} else
   5731 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5732 	}
   5733 
   5734 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5735 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5736 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5737 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5738 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5739 		reg |= KABGTXD_BGSQLBIAS;
   5740 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5741 	}
   5742 
   5743 	/* Reload sc_ctrl */
   5744 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5745 
   5746 	wm_set_eee(sc);
   5747 
   5748 	/*
   5749 	 * For PCH, this write will make sure that any noise will be detected
   5750 	 * as a CRC error and be dropped rather than show up as a bad packet
   5751 	 * to the DMA engine
   5752 	 */
   5753 	if (sc->sc_type == WM_T_PCH)
   5754 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5755 
   5756 	if (sc->sc_type >= WM_T_82544)
   5757 		CSR_WRITE(sc, WMREG_WUC, 0);
   5758 
   5759 	if (sc->sc_type < WM_T_82575)
   5760 		wm_disable_aspm(sc); /* Workaround for some chips */
   5761 
   5762 	wm_reset_mdicnfg_82580(sc);
   5763 
   5764 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5765 		wm_pll_workaround_i210(sc);
   5766 
   5767 	if (sc->sc_type == WM_T_80003) {
   5768 		/* Default to TRUE to enable the MDIC W/A */
   5769 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5770 
   5771 		rv = wm_kmrn_readreg(sc,
   5772 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5773 		if (rv == 0) {
   5774 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5775 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5776 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5777 			else
   5778 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5779 		}
   5780 	}
   5781 }
   5782 
   5783 /*
   5784  * wm_add_rxbuf:
   5785  *
   5786  *	Add a receive buffer to the indiciated descriptor.
   5787  */
   5788 static int
   5789 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5790 {
   5791 	struct wm_softc *sc = rxq->rxq_sc;
   5792 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5793 	struct mbuf *m;
   5794 	int error;
   5795 
   5796 	KASSERT(mutex_owned(rxq->rxq_lock));
   5797 
   5798 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5799 	if (m == NULL)
   5800 		return ENOBUFS;
   5801 
   5802 	MCLGET(m, M_DONTWAIT);
   5803 	if ((m->m_flags & M_EXT) == 0) {
   5804 		m_freem(m);
   5805 		return ENOBUFS;
   5806 	}
   5807 
   5808 	if (rxs->rxs_mbuf != NULL)
   5809 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5810 
   5811 	rxs->rxs_mbuf = m;
   5812 
   5813 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5814 	/*
   5815 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5816 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5817 	 */
   5818 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5819 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5820 	if (error) {
   5821 		/* XXX XXX XXX */
   5822 		aprint_error_dev(sc->sc_dev,
   5823 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5824 		panic("wm_add_rxbuf");
   5825 	}
   5826 
   5827 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5828 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5829 
   5830 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5831 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5832 			wm_init_rxdesc(rxq, idx);
   5833 	} else
   5834 		wm_init_rxdesc(rxq, idx);
   5835 
   5836 	return 0;
   5837 }
   5838 
   5839 /*
   5840  * wm_rxdrain:
   5841  *
   5842  *	Drain the receive queue.
   5843  */
   5844 static void
   5845 wm_rxdrain(struct wm_rxqueue *rxq)
   5846 {
   5847 	struct wm_softc *sc = rxq->rxq_sc;
   5848 	struct wm_rxsoft *rxs;
   5849 	int i;
   5850 
   5851 	KASSERT(mutex_owned(rxq->rxq_lock));
   5852 
   5853 	for (i = 0; i < WM_NRXDESC; i++) {
   5854 		rxs = &rxq->rxq_soft[i];
   5855 		if (rxs->rxs_mbuf != NULL) {
   5856 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5857 			m_freem(rxs->rxs_mbuf);
   5858 			rxs->rxs_mbuf = NULL;
   5859 		}
   5860 	}
   5861 }
   5862 
   5863 /*
   5864  * Setup registers for RSS.
   5865  *
   5866  * XXX not yet VMDq support
   5867  */
   5868 static void
   5869 wm_init_rss(struct wm_softc *sc)
   5870 {
   5871 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5872 	int i;
   5873 
   5874 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5875 
   5876 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5877 		unsigned int qid, reta_ent;
   5878 
   5879 		qid  = i % sc->sc_nqueues;
   5880 		switch (sc->sc_type) {
   5881 		case WM_T_82574:
   5882 			reta_ent = __SHIFTIN(qid,
   5883 			    RETA_ENT_QINDEX_MASK_82574);
   5884 			break;
   5885 		case WM_T_82575:
   5886 			reta_ent = __SHIFTIN(qid,
   5887 			    RETA_ENT_QINDEX1_MASK_82575);
   5888 			break;
   5889 		default:
   5890 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5891 			break;
   5892 		}
   5893 
   5894 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5895 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5896 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5897 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5898 	}
   5899 
   5900 	rss_getkey((uint8_t *)rss_key);
   5901 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5902 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5903 
   5904 	if (sc->sc_type == WM_T_82574)
   5905 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5906 	else
   5907 		mrqc = MRQC_ENABLE_RSS_MQ;
   5908 
   5909 	/*
   5910 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5911 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5912 	 */
   5913 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5914 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5915 #if 0
   5916 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5917 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5918 #endif
   5919 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5920 
   5921 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5922 }
   5923 
   5924 /*
   5925  * Adjust TX and RX queue numbers which the system actulally uses.
   5926  *
   5927  * The numbers are affected by below parameters.
   5928  *     - The nubmer of hardware queues
   5929  *     - The number of MSI-X vectors (= "nvectors" argument)
   5930  *     - ncpu
   5931  */
   5932 static void
   5933 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5934 {
   5935 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5936 
   5937 	if (nvectors < 2) {
   5938 		sc->sc_nqueues = 1;
   5939 		return;
   5940 	}
   5941 
   5942 	switch (sc->sc_type) {
   5943 	case WM_T_82572:
   5944 		hw_ntxqueues = 2;
   5945 		hw_nrxqueues = 2;
   5946 		break;
   5947 	case WM_T_82574:
   5948 		hw_ntxqueues = 2;
   5949 		hw_nrxqueues = 2;
   5950 		break;
   5951 	case WM_T_82575:
   5952 		hw_ntxqueues = 4;
   5953 		hw_nrxqueues = 4;
   5954 		break;
   5955 	case WM_T_82576:
   5956 		hw_ntxqueues = 16;
   5957 		hw_nrxqueues = 16;
   5958 		break;
   5959 	case WM_T_82580:
   5960 	case WM_T_I350:
   5961 	case WM_T_I354:
   5962 		hw_ntxqueues = 8;
   5963 		hw_nrxqueues = 8;
   5964 		break;
   5965 	case WM_T_I210:
   5966 		hw_ntxqueues = 4;
   5967 		hw_nrxqueues = 4;
   5968 		break;
   5969 	case WM_T_I211:
   5970 		hw_ntxqueues = 2;
   5971 		hw_nrxqueues = 2;
   5972 		break;
   5973 		/*
   5974 		 * The below Ethernet controllers do not support MSI-X;
   5975 		 * this driver doesn't let them use multiqueue.
   5976 		 *     - WM_T_80003
   5977 		 *     - WM_T_ICH8
   5978 		 *     - WM_T_ICH9
   5979 		 *     - WM_T_ICH10
   5980 		 *     - WM_T_PCH
   5981 		 *     - WM_T_PCH2
   5982 		 *     - WM_T_PCH_LPT
   5983 		 */
   5984 	default:
   5985 		hw_ntxqueues = 1;
   5986 		hw_nrxqueues = 1;
   5987 		break;
   5988 	}
   5989 
   5990 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5991 
   5992 	/*
   5993 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5994 	 * the number of queues used actually.
   5995 	 */
   5996 	if (nvectors < hw_nqueues + 1)
   5997 		sc->sc_nqueues = nvectors - 1;
   5998 	else
   5999 		sc->sc_nqueues = hw_nqueues;
   6000 
   6001 	/*
   6002 	 * As queues more than CPUs cannot improve scaling, we limit
   6003 	 * the number of queues used actually.
   6004 	 */
   6005 	if (ncpu < sc->sc_nqueues)
   6006 		sc->sc_nqueues = ncpu;
   6007 }
   6008 
   6009 static inline bool
   6010 wm_is_using_msix(struct wm_softc *sc)
   6011 {
   6012 
   6013 	return (sc->sc_nintrs > 1);
   6014 }
   6015 
   6016 static inline bool
   6017 wm_is_using_multiqueue(struct wm_softc *sc)
   6018 {
   6019 
   6020 	return (sc->sc_nqueues > 1);
   6021 }
   6022 
   6023 static int
   6024 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   6025 {
   6026 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   6027 
   6028 	wmq->wmq_id = qidx;
   6029 	wmq->wmq_intr_idx = intr_idx;
   6030 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   6031 	    wm_handle_queue, wmq);
   6032 	if (wmq->wmq_si != NULL)
   6033 		return 0;
   6034 
   6035 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   6036 	    wmq->wmq_id);
   6037 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   6038 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6039 	return ENOMEM;
   6040 }
   6041 
   6042 /*
   6043  * Both single interrupt MSI and INTx can use this function.
   6044  */
   6045 static int
   6046 wm_setup_legacy(struct wm_softc *sc)
   6047 {
   6048 	pci_chipset_tag_t pc = sc->sc_pc;
   6049 	const char *intrstr = NULL;
   6050 	char intrbuf[PCI_INTRSTR_LEN];
   6051 	int error;
   6052 
   6053 	error = wm_alloc_txrx_queues(sc);
   6054 	if (error) {
   6055 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6056 		    error);
   6057 		return ENOMEM;
   6058 	}
   6059 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   6060 	    sizeof(intrbuf));
   6061 #ifdef WM_MPSAFE
   6062 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   6063 #endif
   6064 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   6065 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   6066 	if (sc->sc_ihs[0] == NULL) {
   6067 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   6068 		    (pci_intr_type(pc, sc->sc_intrs[0])
   6069 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6070 		return ENOMEM;
   6071 	}
   6072 
   6073 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   6074 	sc->sc_nintrs = 1;
   6075 
   6076 	return wm_softint_establish_queue(sc, 0, 0);
   6077 }
   6078 
   6079 static int
   6080 wm_setup_msix(struct wm_softc *sc)
   6081 {
   6082 	void *vih;
   6083 	kcpuset_t *affinity;
   6084 	int qidx, error, intr_idx, txrx_established;
   6085 	pci_chipset_tag_t pc = sc->sc_pc;
   6086 	const char *intrstr = NULL;
   6087 	char intrbuf[PCI_INTRSTR_LEN];
   6088 	char intr_xname[INTRDEVNAMEBUF];
   6089 
   6090 	if (sc->sc_nqueues < ncpu) {
   6091 		/*
   6092 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   6093 		 * interrupts start from CPU#1.
   6094 		 */
   6095 		sc->sc_affinity_offset = 1;
   6096 	} else {
   6097 		/*
   6098 		 * In this case, this device use all CPUs. So, we unify
   6099 		 * affinitied cpu_index to msix vector number for readability.
   6100 		 */
   6101 		sc->sc_affinity_offset = 0;
   6102 	}
   6103 
   6104 	error = wm_alloc_txrx_queues(sc);
   6105 	if (error) {
   6106 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   6107 		    error);
   6108 		return ENOMEM;
   6109 	}
   6110 
   6111 	kcpuset_create(&affinity, false);
   6112 	intr_idx = 0;
   6113 
   6114 	/*
   6115 	 * TX and RX
   6116 	 */
   6117 	txrx_established = 0;
   6118 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6119 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6120 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   6121 
   6122 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6123 		    sizeof(intrbuf));
   6124 #ifdef WM_MPSAFE
   6125 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   6126 		    PCI_INTR_MPSAFE, true);
   6127 #endif
   6128 		memset(intr_xname, 0, sizeof(intr_xname));
   6129 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   6130 		    device_xname(sc->sc_dev), qidx);
   6131 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6132 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   6133 		if (vih == NULL) {
   6134 			aprint_error_dev(sc->sc_dev,
   6135 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   6136 			    intrstr ? " at " : "",
   6137 			    intrstr ? intrstr : "");
   6138 
   6139 			goto fail;
   6140 		}
   6141 		kcpuset_zero(affinity);
   6142 		/* Round-robin affinity */
   6143 		kcpuset_set(affinity, affinity_to);
   6144 		error = interrupt_distribute(vih, affinity, NULL);
   6145 		if (error == 0) {
   6146 			aprint_normal_dev(sc->sc_dev,
   6147 			    "for TX and RX interrupting at %s affinity to %u\n",
   6148 			    intrstr, affinity_to);
   6149 		} else {
   6150 			aprint_normal_dev(sc->sc_dev,
   6151 			    "for TX and RX interrupting at %s\n", intrstr);
   6152 		}
   6153 		sc->sc_ihs[intr_idx] = vih;
   6154 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   6155 			goto fail;
   6156 		txrx_established++;
   6157 		intr_idx++;
   6158 	}
   6159 
   6160 	/* LINK */
   6161 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   6162 	    sizeof(intrbuf));
   6163 #ifdef WM_MPSAFE
   6164 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   6165 #endif
   6166 	memset(intr_xname, 0, sizeof(intr_xname));
   6167 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   6168 	    device_xname(sc->sc_dev));
   6169 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   6170 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   6171 	if (vih == NULL) {
   6172 		aprint_error_dev(sc->sc_dev,
   6173 		    "unable to establish MSI-X(for LINK)%s%s\n",
   6174 		    intrstr ? " at " : "",
   6175 		    intrstr ? intrstr : "");
   6176 
   6177 		goto fail;
   6178 	}
   6179 	/* Keep default affinity to LINK interrupt */
   6180 	aprint_normal_dev(sc->sc_dev,
   6181 	    "for LINK interrupting at %s\n", intrstr);
   6182 	sc->sc_ihs[intr_idx] = vih;
   6183 	sc->sc_link_intr_idx = intr_idx;
   6184 
   6185 	sc->sc_nintrs = sc->sc_nqueues + 1;
   6186 	kcpuset_destroy(affinity);
   6187 	return 0;
   6188 
   6189  fail:
   6190 	for (qidx = 0; qidx < txrx_established; qidx++) {
   6191 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6192 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   6193 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   6194 	}
   6195 
   6196 	kcpuset_destroy(affinity);
   6197 	return ENOMEM;
   6198 }
   6199 
   6200 static void
   6201 wm_unset_stopping_flags(struct wm_softc *sc)
   6202 {
   6203 	int i;
   6204 
   6205 	KASSERT(WM_CORE_LOCKED(sc));
   6206 
   6207 	/* Must unset stopping flags in ascending order. */
   6208 	for (i = 0; i < sc->sc_nqueues; i++) {
   6209 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6210 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6211 
   6212 		mutex_enter(txq->txq_lock);
   6213 		txq->txq_stopping = false;
   6214 		mutex_exit(txq->txq_lock);
   6215 
   6216 		mutex_enter(rxq->rxq_lock);
   6217 		rxq->rxq_stopping = false;
   6218 		mutex_exit(rxq->rxq_lock);
   6219 	}
   6220 
   6221 	sc->sc_core_stopping = false;
   6222 }
   6223 
   6224 static void
   6225 wm_set_stopping_flags(struct wm_softc *sc)
   6226 {
   6227 	int i;
   6228 
   6229 	KASSERT(WM_CORE_LOCKED(sc));
   6230 
   6231 	sc->sc_core_stopping = true;
   6232 
   6233 	/* Must set stopping flags in ascending order. */
   6234 	for (i = 0; i < sc->sc_nqueues; i++) {
   6235 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6236 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6237 
   6238 		mutex_enter(rxq->rxq_lock);
   6239 		rxq->rxq_stopping = true;
   6240 		mutex_exit(rxq->rxq_lock);
   6241 
   6242 		mutex_enter(txq->txq_lock);
   6243 		txq->txq_stopping = true;
   6244 		mutex_exit(txq->txq_lock);
   6245 	}
   6246 }
   6247 
   6248 /*
   6249  * Write interrupt interval value to ITR or EITR
   6250  */
   6251 static void
   6252 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   6253 {
   6254 
   6255 	if (!wmq->wmq_set_itr)
   6256 		return;
   6257 
   6258 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6259 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   6260 
   6261 		/*
   6262 		 * 82575 doesn't have CNT_INGR field.
   6263 		 * So, overwrite counter field by software.
   6264 		 */
   6265 		if (sc->sc_type == WM_T_82575)
   6266 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   6267 		else
   6268 			eitr |= EITR_CNT_INGR;
   6269 
   6270 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   6271 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   6272 		/*
   6273 		 * 82574 has both ITR and EITR. SET EITR when we use
   6274 		 * the multi queue function with MSI-X.
   6275 		 */
   6276 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   6277 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   6278 	} else {
   6279 		KASSERT(wmq->wmq_id == 0);
   6280 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   6281 	}
   6282 
   6283 	wmq->wmq_set_itr = false;
   6284 }
   6285 
   6286 /*
   6287  * TODO
   6288  * Below dynamic calculation of itr is almost the same as Linux igb,
   6289  * however it does not fit to wm(4). So, we will have been disable AIM
   6290  * until we will find appropriate calculation of itr.
   6291  */
   6292 /*
   6293  * Calculate interrupt interval value to be going to write register in
   6294  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   6295  */
   6296 static void
   6297 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   6298 {
   6299 #ifdef NOTYET
   6300 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6301 	struct wm_txqueue *txq = &wmq->wmq_txq;
   6302 	uint32_t avg_size = 0;
   6303 	uint32_t new_itr;
   6304 
   6305 	if (rxq->rxq_packets)
   6306 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   6307 	if (txq->txq_packets)
   6308 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   6309 
   6310 	if (avg_size == 0) {
   6311 		new_itr = 450; /* restore default value */
   6312 		goto out;
   6313 	}
   6314 
   6315 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   6316 	avg_size += 24;
   6317 
   6318 	/* Don't starve jumbo frames */
   6319 	avg_size = uimin(avg_size, 3000);
   6320 
   6321 	/* Give a little boost to mid-size frames */
   6322 	if ((avg_size > 300) && (avg_size < 1200))
   6323 		new_itr = avg_size / 3;
   6324 	else
   6325 		new_itr = avg_size / 2;
   6326 
   6327 out:
   6328 	/*
   6329 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   6330 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   6331 	 */
   6332 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   6333 		new_itr *= 4;
   6334 
   6335 	if (new_itr != wmq->wmq_itr) {
   6336 		wmq->wmq_itr = new_itr;
   6337 		wmq->wmq_set_itr = true;
   6338 	} else
   6339 		wmq->wmq_set_itr = false;
   6340 
   6341 	rxq->rxq_packets = 0;
   6342 	rxq->rxq_bytes = 0;
   6343 	txq->txq_packets = 0;
   6344 	txq->txq_bytes = 0;
   6345 #endif
   6346 }
   6347 
   6348 static void
   6349 wm_init_sysctls(struct wm_softc *sc)
   6350 {
   6351 	struct sysctllog **log;
   6352 	const struct sysctlnode *rnode, *qnode, *cnode;
   6353 	int i, rv;
   6354 	const char *dvname;
   6355 
   6356 	log = &sc->sc_sysctllog;
   6357 	dvname = device_xname(sc->sc_dev);
   6358 
   6359 	rv = sysctl_createv(log, 0, NULL, &rnode,
   6360 	    0, CTLTYPE_NODE, dvname,
   6361 	    SYSCTL_DESCR("wm information and settings"),
   6362 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   6363 	if (rv != 0)
   6364 		goto err;
   6365 
   6366 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6367 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   6368 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   6369 	if (rv != 0)
   6370 		goto teardown;
   6371 
   6372 	for (i = 0; i < sc->sc_nqueues; i++) {
   6373 		struct wm_queue *wmq = &sc->sc_queue[i];
   6374 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6375 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6376 
   6377 		snprintf(sc->sc_queue[i].sysctlname,
   6378 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   6379 
   6380 		if (sysctl_createv(log, 0, &rnode, &qnode,
   6381 		    0, CTLTYPE_NODE,
   6382 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   6383 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   6384 			break;
   6385 
   6386 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6387 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6388 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   6389 		    NULL, 0, &txq->txq_free,
   6390 		    0, CTL_CREATE, CTL_EOL) != 0)
   6391 			break;
   6392 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6393 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6394 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
   6395 		    wm_sysctl_tdh_handler, 0, (void *)txq,
   6396 		    0, CTL_CREATE, CTL_EOL) != 0)
   6397 			break;
   6398 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6399 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6400 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
   6401 		    wm_sysctl_tdt_handler, 0, (void *)txq,
   6402 		    0, CTL_CREATE, CTL_EOL) != 0)
   6403 			break;
   6404 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6405 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6406 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   6407 		    NULL, 0, &txq->txq_next,
   6408 		    0, CTL_CREATE, CTL_EOL) != 0)
   6409 			break;
   6410 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6411 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6412 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
   6413 		    NULL, 0, &txq->txq_sfree,
   6414 		    0, CTL_CREATE, CTL_EOL) != 0)
   6415 			break;
   6416 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6417 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6418 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
   6419 		    NULL, 0, &txq->txq_snext,
   6420 		    0, CTL_CREATE, CTL_EOL) != 0)
   6421 			break;
   6422 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6423 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6424 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
   6425 		    NULL, 0, &txq->txq_sdirty,
   6426 		    0, CTL_CREATE, CTL_EOL) != 0)
   6427 			break;
   6428 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6429 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6430 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
   6431 		    NULL, 0, &txq->txq_flags,
   6432 		    0, CTL_CREATE, CTL_EOL) != 0)
   6433 			break;
   6434 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6435 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6436 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
   6437 		    NULL, 0, &txq->txq_stopping,
   6438 		    0, CTL_CREATE, CTL_EOL) != 0)
   6439 			break;
   6440 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6441 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6442 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
   6443 		    NULL, 0, &txq->txq_sending,
   6444 		    0, CTL_CREATE, CTL_EOL) != 0)
   6445 			break;
   6446 
   6447 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6448 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6449 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6450 		    NULL, 0, &rxq->rxq_ptr,
   6451 		    0, CTL_CREATE, CTL_EOL) != 0)
   6452 			break;
   6453 	}
   6454 
   6455 #ifdef WM_DEBUG
   6456 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6457 	    CTLTYPE_INT, "debug_flags",
   6458 	    SYSCTL_DESCR(
   6459 		    "Debug flags:\n"	\
   6460 		    "\t0x01 LINK\n"	\
   6461 		    "\t0x02 TX\n"	\
   6462 		    "\t0x04 RX\n"	\
   6463 		    "\t0x08 GMII\n"	\
   6464 		    "\t0x10 MANAGE\n"	\
   6465 		    "\t0x20 NVM\n"	\
   6466 		    "\t0x40 INIT\n"	\
   6467 		    "\t0x80 LOCK"),
   6468 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6469 	if (rv != 0)
   6470 		goto teardown;
   6471 #endif
   6472 
   6473 	return;
   6474 
   6475 teardown:
   6476 	sysctl_teardown(log);
   6477 err:
   6478 	sc->sc_sysctllog = NULL;
   6479 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6480 	    __func__, rv);
   6481 }
   6482 
   6483 /*
   6484  * wm_init:		[ifnet interface function]
   6485  *
   6486  *	Initialize the interface.
   6487  */
   6488 static int
   6489 wm_init(struct ifnet *ifp)
   6490 {
   6491 	struct wm_softc *sc = ifp->if_softc;
   6492 	int ret;
   6493 
   6494 	KASSERT(IFNET_LOCKED(ifp));
   6495 
   6496 	if (sc->sc_dying)
   6497 		return ENXIO;
   6498 
   6499 	WM_CORE_LOCK(sc);
   6500 	ret = wm_init_locked(ifp);
   6501 	WM_CORE_UNLOCK(sc);
   6502 
   6503 	return ret;
   6504 }
   6505 
   6506 static int
   6507 wm_init_locked(struct ifnet *ifp)
   6508 {
   6509 	struct wm_softc *sc = ifp->if_softc;
   6510 	struct ethercom *ec = &sc->sc_ethercom;
   6511 	int i, j, trynum, error = 0;
   6512 	uint32_t reg, sfp_mask = 0;
   6513 
   6514 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6515 		device_xname(sc->sc_dev), __func__));
   6516 	KASSERT(IFNET_LOCKED(ifp));
   6517 	KASSERT(WM_CORE_LOCKED(sc));
   6518 
   6519 	/*
   6520 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6521 	 * There is a small but measurable benefit to avoiding the adjusment
   6522 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6523 	 * on such platforms.  One possibility is that the DMA itself is
   6524 	 * slightly more efficient if the front of the entire packet (instead
   6525 	 * of the front of the headers) is aligned.
   6526 	 *
   6527 	 * Note we must always set align_tweak to 0 if we are using
   6528 	 * jumbo frames.
   6529 	 */
   6530 #ifdef __NO_STRICT_ALIGNMENT
   6531 	sc->sc_align_tweak = 0;
   6532 #else
   6533 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6534 		sc->sc_align_tweak = 0;
   6535 	else
   6536 		sc->sc_align_tweak = 2;
   6537 #endif /* __NO_STRICT_ALIGNMENT */
   6538 
   6539 	/* Cancel any pending I/O. */
   6540 	wm_stop_locked(ifp, false, false);
   6541 
   6542 	/* Update statistics before reset */
   6543 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6544 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6545 
   6546 	/* >= PCH_SPT hardware workaround before reset. */
   6547 	if (sc->sc_type >= WM_T_PCH_SPT)
   6548 		wm_flush_desc_rings(sc);
   6549 
   6550 	/* Reset the chip to a known state. */
   6551 	wm_reset(sc);
   6552 
   6553 	/*
   6554 	 * AMT based hardware can now take control from firmware
   6555 	 * Do this after reset.
   6556 	 */
   6557 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6558 		wm_get_hw_control(sc);
   6559 
   6560 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6561 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6562 		wm_legacy_irq_quirk_spt(sc);
   6563 
   6564 	/* Init hardware bits */
   6565 	wm_initialize_hardware_bits(sc);
   6566 
   6567 	/* Reset the PHY. */
   6568 	if (sc->sc_flags & WM_F_HAS_MII)
   6569 		wm_gmii_reset(sc);
   6570 
   6571 	if (sc->sc_type >= WM_T_ICH8) {
   6572 		reg = CSR_READ(sc, WMREG_GCR);
   6573 		/*
   6574 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6575 		 * default after reset.
   6576 		 */
   6577 		if (sc->sc_type == WM_T_ICH8)
   6578 			reg |= GCR_NO_SNOOP_ALL;
   6579 		else
   6580 			reg &= ~GCR_NO_SNOOP_ALL;
   6581 		CSR_WRITE(sc, WMREG_GCR, reg);
   6582 	}
   6583 
   6584 	if ((sc->sc_type >= WM_T_ICH8)
   6585 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6586 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6587 
   6588 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6589 		reg |= CTRL_EXT_RO_DIS;
   6590 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6591 	}
   6592 
   6593 	/* Calculate (E)ITR value */
   6594 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6595 		/*
   6596 		 * For NEWQUEUE's EITR (except for 82575).
   6597 		 * 82575's EITR should be set same throttling value as other
   6598 		 * old controllers' ITR because the interrupt/sec calculation
   6599 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6600 		 *
   6601 		 * 82574's EITR should be set same throttling value as ITR.
   6602 		 *
   6603 		 * For N interrupts/sec, set this value to:
   6604 		 * 1,000,000 / N in contrast to ITR throttling value.
   6605 		 */
   6606 		sc->sc_itr_init = 450;
   6607 	} else if (sc->sc_type >= WM_T_82543) {
   6608 		/*
   6609 		 * Set up the interrupt throttling register (units of 256ns)
   6610 		 * Note that a footnote in Intel's documentation says this
   6611 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6612 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6613 		 * that that is also true for the 1024ns units of the other
   6614 		 * interrupt-related timer registers -- so, really, we ought
   6615 		 * to divide this value by 4 when the link speed is low.
   6616 		 *
   6617 		 * XXX implement this division at link speed change!
   6618 		 */
   6619 
   6620 		/*
   6621 		 * For N interrupts/sec, set this value to:
   6622 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6623 		 * absolute and packet timer values to this value
   6624 		 * divided by 4 to get "simple timer" behavior.
   6625 		 */
   6626 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6627 	}
   6628 
   6629 	error = wm_init_txrx_queues(sc);
   6630 	if (error)
   6631 		goto out;
   6632 
   6633 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6634 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6635 	    (sc->sc_type >= WM_T_82575))
   6636 		wm_serdes_power_up_link_82575(sc);
   6637 
   6638 	/* Clear out the VLAN table -- we don't use it (yet). */
   6639 	CSR_WRITE(sc, WMREG_VET, 0);
   6640 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6641 		trynum = 10; /* Due to hw errata */
   6642 	else
   6643 		trynum = 1;
   6644 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6645 		for (j = 0; j < trynum; j++)
   6646 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6647 
   6648 	/*
   6649 	 * Set up flow-control parameters.
   6650 	 *
   6651 	 * XXX Values could probably stand some tuning.
   6652 	 */
   6653 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6654 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6655 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6656 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6657 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6658 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6659 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6660 	}
   6661 
   6662 	sc->sc_fcrtl = FCRTL_DFLT;
   6663 	if (sc->sc_type < WM_T_82543) {
   6664 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6665 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6666 	} else {
   6667 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6668 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6669 	}
   6670 
   6671 	if (sc->sc_type == WM_T_80003)
   6672 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6673 	else
   6674 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6675 
   6676 	/* Writes the control register. */
   6677 	wm_set_vlan(sc);
   6678 
   6679 	if (sc->sc_flags & WM_F_HAS_MII) {
   6680 		uint16_t kmreg;
   6681 
   6682 		switch (sc->sc_type) {
   6683 		case WM_T_80003:
   6684 		case WM_T_ICH8:
   6685 		case WM_T_ICH9:
   6686 		case WM_T_ICH10:
   6687 		case WM_T_PCH:
   6688 		case WM_T_PCH2:
   6689 		case WM_T_PCH_LPT:
   6690 		case WM_T_PCH_SPT:
   6691 		case WM_T_PCH_CNP:
   6692 			/*
   6693 			 * Set the mac to wait the maximum time between each
   6694 			 * iteration and increase the max iterations when
   6695 			 * polling the phy; this fixes erroneous timeouts at
   6696 			 * 10Mbps.
   6697 			 */
   6698 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6699 			    0xFFFF);
   6700 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6701 			    &kmreg);
   6702 			kmreg |= 0x3F;
   6703 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6704 			    kmreg);
   6705 			break;
   6706 		default:
   6707 			break;
   6708 		}
   6709 
   6710 		if (sc->sc_type == WM_T_80003) {
   6711 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6712 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6713 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6714 
   6715 			/* Bypass RX and TX FIFOs */
   6716 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6717 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6718 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6719 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6720 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6721 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6722 		}
   6723 	}
   6724 #if 0
   6725 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6726 #endif
   6727 
   6728 	/* Set up checksum offload parameters. */
   6729 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6730 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6731 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6732 		reg |= RXCSUM_IPOFL;
   6733 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6734 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6735 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6736 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6737 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6738 
   6739 	/* Set registers about MSI-X */
   6740 	if (wm_is_using_msix(sc)) {
   6741 		uint32_t ivar, qintr_idx;
   6742 		struct wm_queue *wmq;
   6743 		unsigned int qid;
   6744 
   6745 		if (sc->sc_type == WM_T_82575) {
   6746 			/* Interrupt control */
   6747 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6748 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6749 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6750 
   6751 			/* TX and RX */
   6752 			for (i = 0; i < sc->sc_nqueues; i++) {
   6753 				wmq = &sc->sc_queue[i];
   6754 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6755 				    EITR_TX_QUEUE(wmq->wmq_id)
   6756 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6757 			}
   6758 			/* Link status */
   6759 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6760 			    EITR_OTHER);
   6761 		} else if (sc->sc_type == WM_T_82574) {
   6762 			/* Interrupt control */
   6763 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6764 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6765 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6766 
   6767 			/*
   6768 			 * Work around issue with spurious interrupts
   6769 			 * in MSI-X mode.
   6770 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6771 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6772 			 */
   6773 			reg = CSR_READ(sc, WMREG_RFCTL);
   6774 			reg |= WMREG_RFCTL_ACKDIS;
   6775 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6776 
   6777 			ivar = 0;
   6778 			/* TX and RX */
   6779 			for (i = 0; i < sc->sc_nqueues; i++) {
   6780 				wmq = &sc->sc_queue[i];
   6781 				qid = wmq->wmq_id;
   6782 				qintr_idx = wmq->wmq_intr_idx;
   6783 
   6784 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6785 				    IVAR_TX_MASK_Q_82574(qid));
   6786 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6787 				    IVAR_RX_MASK_Q_82574(qid));
   6788 			}
   6789 			/* Link status */
   6790 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6791 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6792 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6793 		} else {
   6794 			/* Interrupt control */
   6795 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6796 			    | GPIE_EIAME | GPIE_PBA);
   6797 
   6798 			switch (sc->sc_type) {
   6799 			case WM_T_82580:
   6800 			case WM_T_I350:
   6801 			case WM_T_I354:
   6802 			case WM_T_I210:
   6803 			case WM_T_I211:
   6804 				/* TX and RX */
   6805 				for (i = 0; i < sc->sc_nqueues; i++) {
   6806 					wmq = &sc->sc_queue[i];
   6807 					qid = wmq->wmq_id;
   6808 					qintr_idx = wmq->wmq_intr_idx;
   6809 
   6810 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6811 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6812 					ivar |= __SHIFTIN((qintr_idx
   6813 						| IVAR_VALID),
   6814 					    IVAR_TX_MASK_Q(qid));
   6815 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6816 					ivar |= __SHIFTIN((qintr_idx
   6817 						| IVAR_VALID),
   6818 					    IVAR_RX_MASK_Q(qid));
   6819 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6820 				}
   6821 				break;
   6822 			case WM_T_82576:
   6823 				/* TX and RX */
   6824 				for (i = 0; i < sc->sc_nqueues; i++) {
   6825 					wmq = &sc->sc_queue[i];
   6826 					qid = wmq->wmq_id;
   6827 					qintr_idx = wmq->wmq_intr_idx;
   6828 
   6829 					ivar = CSR_READ(sc,
   6830 					    WMREG_IVAR_Q_82576(qid));
   6831 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6832 					ivar |= __SHIFTIN((qintr_idx
   6833 						| IVAR_VALID),
   6834 					    IVAR_TX_MASK_Q_82576(qid));
   6835 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6836 					ivar |= __SHIFTIN((qintr_idx
   6837 						| IVAR_VALID),
   6838 					    IVAR_RX_MASK_Q_82576(qid));
   6839 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6840 					    ivar);
   6841 				}
   6842 				break;
   6843 			default:
   6844 				break;
   6845 			}
   6846 
   6847 			/* Link status */
   6848 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6849 			    IVAR_MISC_OTHER);
   6850 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6851 		}
   6852 
   6853 		if (wm_is_using_multiqueue(sc)) {
   6854 			wm_init_rss(sc);
   6855 
   6856 			/*
   6857 			** NOTE: Receive Full-Packet Checksum Offload
   6858 			** is mutually exclusive with Multiqueue. However
   6859 			** this is not the same as TCP/IP checksums which
   6860 			** still work.
   6861 			*/
   6862 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6863 			reg |= RXCSUM_PCSD;
   6864 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6865 		}
   6866 	}
   6867 
   6868 	/* Set up the interrupt registers. */
   6869 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6870 
   6871 	/* Enable SFP module insertion interrupt if it's required */
   6872 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6873 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6874 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6875 		sfp_mask = ICR_GPI(0);
   6876 	}
   6877 
   6878 	if (wm_is_using_msix(sc)) {
   6879 		uint32_t mask;
   6880 		struct wm_queue *wmq;
   6881 
   6882 		switch (sc->sc_type) {
   6883 		case WM_T_82574:
   6884 			mask = 0;
   6885 			for (i = 0; i < sc->sc_nqueues; i++) {
   6886 				wmq = &sc->sc_queue[i];
   6887 				mask |= ICR_TXQ(wmq->wmq_id);
   6888 				mask |= ICR_RXQ(wmq->wmq_id);
   6889 			}
   6890 			mask |= ICR_OTHER;
   6891 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6892 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6893 			break;
   6894 		default:
   6895 			if (sc->sc_type == WM_T_82575) {
   6896 				mask = 0;
   6897 				for (i = 0; i < sc->sc_nqueues; i++) {
   6898 					wmq = &sc->sc_queue[i];
   6899 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6900 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6901 				}
   6902 				mask |= EITR_OTHER;
   6903 			} else {
   6904 				mask = 0;
   6905 				for (i = 0; i < sc->sc_nqueues; i++) {
   6906 					wmq = &sc->sc_queue[i];
   6907 					mask |= 1 << wmq->wmq_intr_idx;
   6908 				}
   6909 				mask |= 1 << sc->sc_link_intr_idx;
   6910 			}
   6911 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6912 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6913 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6914 
   6915 			/* For other interrupts */
   6916 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6917 			break;
   6918 		}
   6919 	} else {
   6920 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6921 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6922 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6923 	}
   6924 
   6925 	/* Set up the inter-packet gap. */
   6926 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6927 
   6928 	if (sc->sc_type >= WM_T_82543) {
   6929 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6930 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6931 			wm_itrs_writereg(sc, wmq);
   6932 		}
   6933 		/*
   6934 		 * Link interrupts occur much less than TX
   6935 		 * interrupts and RX interrupts. So, we don't
   6936 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6937 		 * FreeBSD's if_igb.
   6938 		 */
   6939 	}
   6940 
   6941 	/* Set the VLAN EtherType. */
   6942 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6943 
   6944 	/*
   6945 	 * Set up the transmit control register; we start out with
   6946 	 * a collision distance suitable for FDX, but update it when
   6947 	 * we resolve the media type.
   6948 	 */
   6949 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6950 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6951 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6952 	if (sc->sc_type >= WM_T_82571)
   6953 		sc->sc_tctl |= TCTL_MULR;
   6954 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6955 
   6956 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6957 		/* Write TDT after TCTL.EN is set. See the document. */
   6958 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6959 	}
   6960 
   6961 	if (sc->sc_type == WM_T_80003) {
   6962 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6963 		reg &= ~TCTL_EXT_GCEX_MASK;
   6964 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6965 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6966 	}
   6967 
   6968 	/* Set the media. */
   6969 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6970 		goto out;
   6971 
   6972 	/* Configure for OS presence */
   6973 	wm_init_manageability(sc);
   6974 
   6975 	/*
   6976 	 * Set up the receive control register; we actually program the
   6977 	 * register when we set the receive filter. Use multicast address
   6978 	 * offset type 0.
   6979 	 *
   6980 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6981 	 * don't enable that feature.
   6982 	 */
   6983 	sc->sc_mchash_type = 0;
   6984 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6985 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6986 
   6987 	/* 82574 use one buffer extended Rx descriptor. */
   6988 	if (sc->sc_type == WM_T_82574)
   6989 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6990 
   6991 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   6992 		sc->sc_rctl |= RCTL_SECRC;
   6993 
   6994 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6995 	    && (ifp->if_mtu > ETHERMTU)) {
   6996 		sc->sc_rctl |= RCTL_LPE;
   6997 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6998 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6999 	}
   7000 
   7001 	if (MCLBYTES == 2048)
   7002 		sc->sc_rctl |= RCTL_2k;
   7003 	else {
   7004 		if (sc->sc_type >= WM_T_82543) {
   7005 			switch (MCLBYTES) {
   7006 			case 4096:
   7007 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   7008 				break;
   7009 			case 8192:
   7010 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   7011 				break;
   7012 			case 16384:
   7013 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   7014 				break;
   7015 			default:
   7016 				panic("wm_init: MCLBYTES %d unsupported",
   7017 				    MCLBYTES);
   7018 				break;
   7019 			}
   7020 		} else
   7021 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   7022 	}
   7023 
   7024 	/* Enable ECC */
   7025 	switch (sc->sc_type) {
   7026 	case WM_T_82571:
   7027 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   7028 		reg |= PBA_ECC_CORR_EN;
   7029 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   7030 		break;
   7031 	case WM_T_PCH_LPT:
   7032 	case WM_T_PCH_SPT:
   7033 	case WM_T_PCH_CNP:
   7034 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   7035 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   7036 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   7037 
   7038 		sc->sc_ctrl |= CTRL_MEHE;
   7039 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7040 		break;
   7041 	default:
   7042 		break;
   7043 	}
   7044 
   7045 	/*
   7046 	 * Set the receive filter.
   7047 	 *
   7048 	 * For 82575 and 82576, the RX descriptors must be initialized after
   7049 	 * the setting of RCTL.EN in wm_set_filter()
   7050 	 */
   7051 	wm_set_filter(sc);
   7052 
   7053 	/* On 575 and later set RDT only if RX enabled */
   7054 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7055 		int qidx;
   7056 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7057 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   7058 			for (i = 0; i < WM_NRXDESC; i++) {
   7059 				mutex_enter(rxq->rxq_lock);
   7060 				wm_init_rxdesc(rxq, i);
   7061 				mutex_exit(rxq->rxq_lock);
   7062 
   7063 			}
   7064 		}
   7065 	}
   7066 
   7067 	wm_unset_stopping_flags(sc);
   7068 
   7069 	/* Start the one second link check clock. */
   7070 	callout_schedule(&sc->sc_tick_ch, hz);
   7071 
   7072 	/*
   7073 	 * ...all done! (IFNET_LOCKED asserted above.)
   7074 	 */
   7075 	ifp->if_flags |= IFF_RUNNING;
   7076 
   7077  out:
   7078 	/* Save last flags for the callback */
   7079 	sc->sc_if_flags = ifp->if_flags;
   7080 	sc->sc_ec_capenable = ec->ec_capenable;
   7081 	if (error)
   7082 		log(LOG_ERR, "%s: interface not running\n",
   7083 		    device_xname(sc->sc_dev));
   7084 	return error;
   7085 }
   7086 
   7087 /*
   7088  * wm_stop:		[ifnet interface function]
   7089  *
   7090  *	Stop transmission on the interface.
   7091  */
   7092 static void
   7093 wm_stop(struct ifnet *ifp, int disable)
   7094 {
   7095 	struct wm_softc *sc = ifp->if_softc;
   7096 
   7097 	ASSERT_SLEEPABLE();
   7098 	KASSERT(IFNET_LOCKED(ifp));
   7099 
   7100 	WM_CORE_LOCK(sc);
   7101 	wm_stop_locked(ifp, disable ? true : false, true);
   7102 	WM_CORE_UNLOCK(sc);
   7103 
   7104 	/*
   7105 	 * After wm_set_stopping_flags(), it is guaranteed that
   7106 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   7107 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   7108 	 * because it can sleep...
   7109 	 * so, call workqueue_wait() here.
   7110 	 */
   7111 	for (int i = 0; i < sc->sc_nqueues; i++)
   7112 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   7113 }
   7114 
   7115 static void
   7116 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   7117 {
   7118 	struct wm_softc *sc = ifp->if_softc;
   7119 	struct wm_txsoft *txs;
   7120 	int i, qidx;
   7121 
   7122 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7123 		device_xname(sc->sc_dev), __func__));
   7124 	KASSERT(IFNET_LOCKED(ifp));
   7125 	KASSERT(WM_CORE_LOCKED(sc));
   7126 
   7127 	wm_set_stopping_flags(sc);
   7128 
   7129 	if (sc->sc_flags & WM_F_HAS_MII) {
   7130 		/* Down the MII. */
   7131 		mii_down(&sc->sc_mii);
   7132 	} else {
   7133 #if 0
   7134 		/* Should we clear PHY's status properly? */
   7135 		wm_reset(sc);
   7136 #endif
   7137 	}
   7138 
   7139 	/* Stop the transmit and receive processes. */
   7140 	CSR_WRITE(sc, WMREG_TCTL, 0);
   7141 	CSR_WRITE(sc, WMREG_RCTL, 0);
   7142 	sc->sc_rctl &= ~RCTL_EN;
   7143 
   7144 	/*
   7145 	 * Clear the interrupt mask to ensure the device cannot assert its
   7146 	 * interrupt line.
   7147 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   7148 	 * service any currently pending or shared interrupt.
   7149 	 */
   7150 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   7151 	sc->sc_icr = 0;
   7152 	if (wm_is_using_msix(sc)) {
   7153 		if (sc->sc_type != WM_T_82574) {
   7154 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   7155 			CSR_WRITE(sc, WMREG_EIAC, 0);
   7156 		} else
   7157 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   7158 	}
   7159 
   7160 	/*
   7161 	 * Stop callouts after interrupts are disabled; if we have
   7162 	 * to wait for them, we will be releasing the CORE_LOCK
   7163 	 * briefly, which will unblock interrupts on the current CPU.
   7164 	 */
   7165 
   7166 	/* Stop the one second clock. */
   7167 	if (wait)
   7168 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   7169 	else
   7170 		callout_stop(&sc->sc_tick_ch);
   7171 
   7172 	/* Stop the 82547 Tx FIFO stall check timer. */
   7173 	if (sc->sc_type == WM_T_82547) {
   7174 		if (wait)
   7175 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   7176 		else
   7177 			callout_stop(&sc->sc_txfifo_ch);
   7178 	}
   7179 
   7180 	/* Release any queued transmit buffers. */
   7181 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   7182 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   7183 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7184 		struct mbuf *m;
   7185 
   7186 		mutex_enter(txq->txq_lock);
   7187 		txq->txq_sending = false; /* Ensure watchdog disabled */
   7188 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7189 			txs = &txq->txq_soft[i];
   7190 			if (txs->txs_mbuf != NULL) {
   7191 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   7192 				m_freem(txs->txs_mbuf);
   7193 				txs->txs_mbuf = NULL;
   7194 			}
   7195 		}
   7196 		/* Drain txq_interq */
   7197 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7198 			m_freem(m);
   7199 		mutex_exit(txq->txq_lock);
   7200 	}
   7201 
   7202 	/* Mark the interface as down and cancel the watchdog timer. */
   7203 	ifp->if_flags &= ~IFF_RUNNING;
   7204 	sc->sc_if_flags = ifp->if_flags;
   7205 
   7206 	if (disable) {
   7207 		for (i = 0; i < sc->sc_nqueues; i++) {
   7208 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7209 			mutex_enter(rxq->rxq_lock);
   7210 			wm_rxdrain(rxq);
   7211 			mutex_exit(rxq->rxq_lock);
   7212 		}
   7213 	}
   7214 
   7215 #if 0 /* notyet */
   7216 	if (sc->sc_type >= WM_T_82544)
   7217 		CSR_WRITE(sc, WMREG_WUC, 0);
   7218 #endif
   7219 }
   7220 
   7221 static void
   7222 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   7223 {
   7224 	struct mbuf *m;
   7225 	int i;
   7226 
   7227 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   7228 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   7229 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   7230 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   7231 		    m->m_data, m->m_len, m->m_flags);
   7232 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   7233 	    i, i == 1 ? "" : "s");
   7234 }
   7235 
   7236 /*
   7237  * wm_82547_txfifo_stall:
   7238  *
   7239  *	Callout used to wait for the 82547 Tx FIFO to drain,
   7240  *	reset the FIFO pointers, and restart packet transmission.
   7241  */
   7242 static void
   7243 wm_82547_txfifo_stall(void *arg)
   7244 {
   7245 	struct wm_softc *sc = arg;
   7246 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7247 
   7248 	mutex_enter(txq->txq_lock);
   7249 
   7250 	if (txq->txq_stopping)
   7251 		goto out;
   7252 
   7253 	if (txq->txq_fifo_stall) {
   7254 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   7255 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   7256 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   7257 			/*
   7258 			 * Packets have drained.  Stop transmitter, reset
   7259 			 * FIFO pointers, restart transmitter, and kick
   7260 			 * the packet queue.
   7261 			 */
   7262 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   7263 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   7264 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   7265 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   7266 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   7267 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   7268 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   7269 			CSR_WRITE_FLUSH(sc);
   7270 
   7271 			txq->txq_fifo_head = 0;
   7272 			txq->txq_fifo_stall = 0;
   7273 			wm_start_locked(&sc->sc_ethercom.ec_if);
   7274 		} else {
   7275 			/*
   7276 			 * Still waiting for packets to drain; try again in
   7277 			 * another tick.
   7278 			 */
   7279 			callout_schedule(&sc->sc_txfifo_ch, 1);
   7280 		}
   7281 	}
   7282 
   7283 out:
   7284 	mutex_exit(txq->txq_lock);
   7285 }
   7286 
   7287 /*
   7288  * wm_82547_txfifo_bugchk:
   7289  *
   7290  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   7291  *	prevent enqueueing a packet that would wrap around the end
   7292  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   7293  *
   7294  *	We do this by checking the amount of space before the end
   7295  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   7296  *	the Tx FIFO, wait for all remaining packets to drain, reset
   7297  *	the internal FIFO pointers to the beginning, and restart
   7298  *	transmission on the interface.
   7299  */
   7300 #define	WM_FIFO_HDR		0x10
   7301 #define	WM_82547_PAD_LEN	0x3e0
   7302 static int
   7303 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   7304 {
   7305 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7306 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   7307 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   7308 
   7309 	/* Just return if already stalled. */
   7310 	if (txq->txq_fifo_stall)
   7311 		return 1;
   7312 
   7313 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   7314 		/* Stall only occurs in half-duplex mode. */
   7315 		goto send_packet;
   7316 	}
   7317 
   7318 	if (len >= WM_82547_PAD_LEN + space) {
   7319 		txq->txq_fifo_stall = 1;
   7320 		callout_schedule(&sc->sc_txfifo_ch, 1);
   7321 		return 1;
   7322 	}
   7323 
   7324  send_packet:
   7325 	txq->txq_fifo_head += len;
   7326 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   7327 		txq->txq_fifo_head -= txq->txq_fifo_size;
   7328 
   7329 	return 0;
   7330 }
   7331 
   7332 static int
   7333 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7334 {
   7335 	int error;
   7336 
   7337 	/*
   7338 	 * Allocate the control data structures, and create and load the
   7339 	 * DMA map for it.
   7340 	 *
   7341 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7342 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7343 	 * both sets within the same 4G segment.
   7344 	 */
   7345 	if (sc->sc_type < WM_T_82544)
   7346 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   7347 	else
   7348 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   7349 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7350 		txq->txq_descsize = sizeof(nq_txdesc_t);
   7351 	else
   7352 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   7353 
   7354 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   7355 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   7356 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   7357 		aprint_error_dev(sc->sc_dev,
   7358 		    "unable to allocate TX control data, error = %d\n",
   7359 		    error);
   7360 		goto fail_0;
   7361 	}
   7362 
   7363 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   7364 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   7365 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7366 		aprint_error_dev(sc->sc_dev,
   7367 		    "unable to map TX control data, error = %d\n", error);
   7368 		goto fail_1;
   7369 	}
   7370 
   7371 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   7372 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   7373 		aprint_error_dev(sc->sc_dev,
   7374 		    "unable to create TX control data DMA map, error = %d\n",
   7375 		    error);
   7376 		goto fail_2;
   7377 	}
   7378 
   7379 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   7380 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   7381 		aprint_error_dev(sc->sc_dev,
   7382 		    "unable to load TX control data DMA map, error = %d\n",
   7383 		    error);
   7384 		goto fail_3;
   7385 	}
   7386 
   7387 	return 0;
   7388 
   7389  fail_3:
   7390 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7391  fail_2:
   7392 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7393 	    WM_TXDESCS_SIZE(txq));
   7394  fail_1:
   7395 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7396  fail_0:
   7397 	return error;
   7398 }
   7399 
   7400 static void
   7401 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7402 {
   7403 
   7404 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   7405 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7406 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7407 	    WM_TXDESCS_SIZE(txq));
   7408 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7409 }
   7410 
   7411 static int
   7412 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7413 {
   7414 	int error;
   7415 	size_t rxq_descs_size;
   7416 
   7417 	/*
   7418 	 * Allocate the control data structures, and create and load the
   7419 	 * DMA map for it.
   7420 	 *
   7421 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7422 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7423 	 * both sets within the same 4G segment.
   7424 	 */
   7425 	rxq->rxq_ndesc = WM_NRXDESC;
   7426 	if (sc->sc_type == WM_T_82574)
   7427 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   7428 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7429 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   7430 	else
   7431 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   7432 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   7433 
   7434 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   7435 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   7436 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   7437 		aprint_error_dev(sc->sc_dev,
   7438 		    "unable to allocate RX control data, error = %d\n",
   7439 		    error);
   7440 		goto fail_0;
   7441 	}
   7442 
   7443 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   7444 		    rxq->rxq_desc_rseg, rxq_descs_size,
   7445 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7446 		aprint_error_dev(sc->sc_dev,
   7447 		    "unable to map RX control data, error = %d\n", error);
   7448 		goto fail_1;
   7449 	}
   7450 
   7451 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   7452 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   7453 		aprint_error_dev(sc->sc_dev,
   7454 		    "unable to create RX control data DMA map, error = %d\n",
   7455 		    error);
   7456 		goto fail_2;
   7457 	}
   7458 
   7459 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7460 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7461 		aprint_error_dev(sc->sc_dev,
   7462 		    "unable to load RX control data DMA map, error = %d\n",
   7463 		    error);
   7464 		goto fail_3;
   7465 	}
   7466 
   7467 	return 0;
   7468 
   7469  fail_3:
   7470 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7471  fail_2:
   7472 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7473 	    rxq_descs_size);
   7474  fail_1:
   7475 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7476  fail_0:
   7477 	return error;
   7478 }
   7479 
   7480 static void
   7481 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7482 {
   7483 
   7484 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7485 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7486 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7487 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7488 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7489 }
   7490 
   7491 
   7492 static int
   7493 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7494 {
   7495 	int i, error;
   7496 
   7497 	/* Create the transmit buffer DMA maps. */
   7498 	WM_TXQUEUELEN(txq) =
   7499 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7500 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7501 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7502 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7503 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7504 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7505 			aprint_error_dev(sc->sc_dev,
   7506 			    "unable to create Tx DMA map %d, error = %d\n",
   7507 			    i, error);
   7508 			goto fail;
   7509 		}
   7510 	}
   7511 
   7512 	return 0;
   7513 
   7514  fail:
   7515 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7516 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7517 			bus_dmamap_destroy(sc->sc_dmat,
   7518 			    txq->txq_soft[i].txs_dmamap);
   7519 	}
   7520 	return error;
   7521 }
   7522 
   7523 static void
   7524 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7525 {
   7526 	int i;
   7527 
   7528 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7529 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7530 			bus_dmamap_destroy(sc->sc_dmat,
   7531 			    txq->txq_soft[i].txs_dmamap);
   7532 	}
   7533 }
   7534 
   7535 static int
   7536 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7537 {
   7538 	int i, error;
   7539 
   7540 	/* Create the receive buffer DMA maps. */
   7541 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7542 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7543 			    MCLBYTES, 0, 0,
   7544 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7545 			aprint_error_dev(sc->sc_dev,
   7546 			    "unable to create Rx DMA map %d error = %d\n",
   7547 			    i, error);
   7548 			goto fail;
   7549 		}
   7550 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7551 	}
   7552 
   7553 	return 0;
   7554 
   7555  fail:
   7556 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7557 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7558 			bus_dmamap_destroy(sc->sc_dmat,
   7559 			    rxq->rxq_soft[i].rxs_dmamap);
   7560 	}
   7561 	return error;
   7562 }
   7563 
   7564 static void
   7565 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7566 {
   7567 	int i;
   7568 
   7569 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7570 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7571 			bus_dmamap_destroy(sc->sc_dmat,
   7572 			    rxq->rxq_soft[i].rxs_dmamap);
   7573 	}
   7574 }
   7575 
   7576 /*
   7577  * wm_alloc_quques:
   7578  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7579  */
   7580 static int
   7581 wm_alloc_txrx_queues(struct wm_softc *sc)
   7582 {
   7583 	int i, error, tx_done, rx_done;
   7584 
   7585 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7586 	    KM_SLEEP);
   7587 	if (sc->sc_queue == NULL) {
   7588 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7589 		error = ENOMEM;
   7590 		goto fail_0;
   7591 	}
   7592 
   7593 	/* For transmission */
   7594 	error = 0;
   7595 	tx_done = 0;
   7596 	for (i = 0; i < sc->sc_nqueues; i++) {
   7597 #ifdef WM_EVENT_COUNTERS
   7598 		int j;
   7599 		const char *xname;
   7600 #endif
   7601 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7602 		txq->txq_sc = sc;
   7603 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7604 
   7605 		error = wm_alloc_tx_descs(sc, txq);
   7606 		if (error)
   7607 			break;
   7608 		error = wm_alloc_tx_buffer(sc, txq);
   7609 		if (error) {
   7610 			wm_free_tx_descs(sc, txq);
   7611 			break;
   7612 		}
   7613 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7614 		if (txq->txq_interq == NULL) {
   7615 			wm_free_tx_descs(sc, txq);
   7616 			wm_free_tx_buffer(sc, txq);
   7617 			error = ENOMEM;
   7618 			break;
   7619 		}
   7620 
   7621 #ifdef WM_EVENT_COUNTERS
   7622 		xname = device_xname(sc->sc_dev);
   7623 
   7624 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7625 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7626 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7627 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7628 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7629 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7630 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7631 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7632 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7633 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7634 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7635 
   7636 		for (j = 0; j < WM_NTXSEGS; j++) {
   7637 			snprintf(txq->txq_txseg_evcnt_names[j],
   7638 			    sizeof(txq->txq_txseg_evcnt_names[j]),
   7639 			    "txq%02dtxseg%d", i, j);
   7640 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
   7641 			    EVCNT_TYPE_MISC,
   7642 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7643 		}
   7644 
   7645 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7646 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7647 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7648 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7649 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7650 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7651 #endif /* WM_EVENT_COUNTERS */
   7652 
   7653 		tx_done++;
   7654 	}
   7655 	if (error)
   7656 		goto fail_1;
   7657 
   7658 	/* For receive */
   7659 	error = 0;
   7660 	rx_done = 0;
   7661 	for (i = 0; i < sc->sc_nqueues; i++) {
   7662 #ifdef WM_EVENT_COUNTERS
   7663 		const char *xname;
   7664 #endif
   7665 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7666 		rxq->rxq_sc = sc;
   7667 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7668 
   7669 		error = wm_alloc_rx_descs(sc, rxq);
   7670 		if (error)
   7671 			break;
   7672 
   7673 		error = wm_alloc_rx_buffer(sc, rxq);
   7674 		if (error) {
   7675 			wm_free_rx_descs(sc, rxq);
   7676 			break;
   7677 		}
   7678 
   7679 #ifdef WM_EVENT_COUNTERS
   7680 		xname = device_xname(sc->sc_dev);
   7681 
   7682 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7683 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7684 
   7685 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7686 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7687 #endif /* WM_EVENT_COUNTERS */
   7688 
   7689 		rx_done++;
   7690 	}
   7691 	if (error)
   7692 		goto fail_2;
   7693 
   7694 	return 0;
   7695 
   7696  fail_2:
   7697 	for (i = 0; i < rx_done; i++) {
   7698 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7699 		wm_free_rx_buffer(sc, rxq);
   7700 		wm_free_rx_descs(sc, rxq);
   7701 		if (rxq->rxq_lock)
   7702 			mutex_obj_free(rxq->rxq_lock);
   7703 	}
   7704  fail_1:
   7705 	for (i = 0; i < tx_done; i++) {
   7706 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7707 		pcq_destroy(txq->txq_interq);
   7708 		wm_free_tx_buffer(sc, txq);
   7709 		wm_free_tx_descs(sc, txq);
   7710 		if (txq->txq_lock)
   7711 			mutex_obj_free(txq->txq_lock);
   7712 	}
   7713 
   7714 	kmem_free(sc->sc_queue,
   7715 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7716  fail_0:
   7717 	return error;
   7718 }
   7719 
   7720 /*
   7721  * wm_free_quques:
   7722  *	Free {tx,rx}descs and {tx,rx} buffers
   7723  */
   7724 static void
   7725 wm_free_txrx_queues(struct wm_softc *sc)
   7726 {
   7727 	int i;
   7728 
   7729 	for (i = 0; i < sc->sc_nqueues; i++) {
   7730 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7731 
   7732 #ifdef WM_EVENT_COUNTERS
   7733 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7734 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7735 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7736 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7737 #endif /* WM_EVENT_COUNTERS */
   7738 
   7739 		wm_free_rx_buffer(sc, rxq);
   7740 		wm_free_rx_descs(sc, rxq);
   7741 		if (rxq->rxq_lock)
   7742 			mutex_obj_free(rxq->rxq_lock);
   7743 	}
   7744 
   7745 	for (i = 0; i < sc->sc_nqueues; i++) {
   7746 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7747 		struct mbuf *m;
   7748 #ifdef WM_EVENT_COUNTERS
   7749 		int j;
   7750 
   7751 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7752 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7753 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7754 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7755 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7756 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7757 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7758 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7759 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7760 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7761 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7762 
   7763 		for (j = 0; j < WM_NTXSEGS; j++)
   7764 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7765 
   7766 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7767 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7768 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7769 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7770 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7771 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7772 #endif /* WM_EVENT_COUNTERS */
   7773 
   7774 		/* Drain txq_interq */
   7775 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7776 			m_freem(m);
   7777 		pcq_destroy(txq->txq_interq);
   7778 
   7779 		wm_free_tx_buffer(sc, txq);
   7780 		wm_free_tx_descs(sc, txq);
   7781 		if (txq->txq_lock)
   7782 			mutex_obj_free(txq->txq_lock);
   7783 	}
   7784 
   7785 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7786 }
   7787 
   7788 static void
   7789 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7790 {
   7791 
   7792 	KASSERT(mutex_owned(txq->txq_lock));
   7793 
   7794 	/* Initialize the transmit descriptor ring. */
   7795 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7796 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7797 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7798 	txq->txq_free = WM_NTXDESC(txq);
   7799 	txq->txq_next = 0;
   7800 }
   7801 
   7802 static void
   7803 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7804     struct wm_txqueue *txq)
   7805 {
   7806 
   7807 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7808 		device_xname(sc->sc_dev), __func__));
   7809 	KASSERT(mutex_owned(txq->txq_lock));
   7810 
   7811 	if (sc->sc_type < WM_T_82543) {
   7812 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7813 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7814 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7815 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7816 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7817 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7818 	} else {
   7819 		int qid = wmq->wmq_id;
   7820 
   7821 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7822 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7823 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7824 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7825 
   7826 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7827 			/*
   7828 			 * Don't write TDT before TCTL.EN is set.
   7829 			 * See the document.
   7830 			 */
   7831 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7832 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7833 			    | TXDCTL_WTHRESH(0));
   7834 		else {
   7835 			/* XXX should update with AIM? */
   7836 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7837 			if (sc->sc_type >= WM_T_82540) {
   7838 				/* Should be the same */
   7839 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7840 			}
   7841 
   7842 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7843 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7844 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7845 		}
   7846 	}
   7847 }
   7848 
   7849 static void
   7850 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7851 {
   7852 	int i;
   7853 
   7854 	KASSERT(mutex_owned(txq->txq_lock));
   7855 
   7856 	/* Initialize the transmit job descriptors. */
   7857 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7858 		txq->txq_soft[i].txs_mbuf = NULL;
   7859 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7860 	txq->txq_snext = 0;
   7861 	txq->txq_sdirty = 0;
   7862 }
   7863 
   7864 static void
   7865 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7866     struct wm_txqueue *txq)
   7867 {
   7868 
   7869 	KASSERT(mutex_owned(txq->txq_lock));
   7870 
   7871 	/*
   7872 	 * Set up some register offsets that are different between
   7873 	 * the i82542 and the i82543 and later chips.
   7874 	 */
   7875 	if (sc->sc_type < WM_T_82543)
   7876 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7877 	else
   7878 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7879 
   7880 	wm_init_tx_descs(sc, txq);
   7881 	wm_init_tx_regs(sc, wmq, txq);
   7882 	wm_init_tx_buffer(sc, txq);
   7883 
   7884 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
   7885 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
   7886 
   7887 	txq->txq_sending = false;
   7888 }
   7889 
   7890 static void
   7891 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7892     struct wm_rxqueue *rxq)
   7893 {
   7894 
   7895 	KASSERT(mutex_owned(rxq->rxq_lock));
   7896 
   7897 	/*
   7898 	 * Initialize the receive descriptor and receive job
   7899 	 * descriptor rings.
   7900 	 */
   7901 	if (sc->sc_type < WM_T_82543) {
   7902 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7903 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7904 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7905 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7906 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7907 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7908 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7909 
   7910 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7911 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7912 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7913 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7914 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7915 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7916 	} else {
   7917 		int qid = wmq->wmq_id;
   7918 
   7919 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7920 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7921 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7922 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7923 
   7924 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7925 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7926 				panic("%s: MCLBYTES %d unsupported for 82575 "
   7927 				    "or higher\n", __func__, MCLBYTES);
   7928 
   7929 			/*
   7930 			 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
   7931 			 * only.
   7932 			 */
   7933 			CSR_WRITE(sc, WMREG_SRRCTL(qid),
   7934 			    SRRCTL_DESCTYPE_ADV_ONEBUF
   7935 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7936 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7937 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7938 			    | RXDCTL_WTHRESH(1));
   7939 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7940 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7941 		} else {
   7942 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7943 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7944 			/* XXX should update with AIM? */
   7945 			CSR_WRITE(sc, WMREG_RDTR,
   7946 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7947 			/* MUST be same */
   7948 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7949 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7950 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7951 		}
   7952 	}
   7953 }
   7954 
   7955 static int
   7956 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7957 {
   7958 	struct wm_rxsoft *rxs;
   7959 	int error, i;
   7960 
   7961 	KASSERT(mutex_owned(rxq->rxq_lock));
   7962 
   7963 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7964 		rxs = &rxq->rxq_soft[i];
   7965 		if (rxs->rxs_mbuf == NULL) {
   7966 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7967 				log(LOG_ERR, "%s: unable to allocate or map "
   7968 				    "rx buffer %d, error = %d\n",
   7969 				    device_xname(sc->sc_dev), i, error);
   7970 				/*
   7971 				 * XXX Should attempt to run with fewer receive
   7972 				 * XXX buffers instead of just failing.
   7973 				 */
   7974 				wm_rxdrain(rxq);
   7975 				return ENOMEM;
   7976 			}
   7977 		} else {
   7978 			/*
   7979 			 * For 82575 and 82576, the RX descriptors must be
   7980 			 * initialized after the setting of RCTL.EN in
   7981 			 * wm_set_filter()
   7982 			 */
   7983 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7984 				wm_init_rxdesc(rxq, i);
   7985 		}
   7986 	}
   7987 	rxq->rxq_ptr = 0;
   7988 	rxq->rxq_discard = 0;
   7989 	WM_RXCHAIN_RESET(rxq);
   7990 
   7991 	return 0;
   7992 }
   7993 
   7994 static int
   7995 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7996     struct wm_rxqueue *rxq)
   7997 {
   7998 
   7999 	KASSERT(mutex_owned(rxq->rxq_lock));
   8000 
   8001 	/*
   8002 	 * Set up some register offsets that are different between
   8003 	 * the i82542 and the i82543 and later chips.
   8004 	 */
   8005 	if (sc->sc_type < WM_T_82543)
   8006 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   8007 	else
   8008 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   8009 
   8010 	wm_init_rx_regs(sc, wmq, rxq);
   8011 	return wm_init_rx_buffer(sc, rxq);
   8012 }
   8013 
   8014 /*
   8015  * wm_init_quques:
   8016  *	Initialize {tx,rx}descs and {tx,rx} buffers
   8017  */
   8018 static int
   8019 wm_init_txrx_queues(struct wm_softc *sc)
   8020 {
   8021 	int i, error = 0;
   8022 
   8023 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   8024 		device_xname(sc->sc_dev), __func__));
   8025 
   8026 	for (i = 0; i < sc->sc_nqueues; i++) {
   8027 		struct wm_queue *wmq = &sc->sc_queue[i];
   8028 		struct wm_txqueue *txq = &wmq->wmq_txq;
   8029 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8030 
   8031 		/*
   8032 		 * TODO
   8033 		 * Currently, use constant variable instead of AIM.
   8034 		 * Furthermore, the interrupt interval of multiqueue which use
   8035 		 * polling mode is less than default value.
   8036 		 * More tuning and AIM are required.
   8037 		 */
   8038 		if (wm_is_using_multiqueue(sc))
   8039 			wmq->wmq_itr = 50;
   8040 		else
   8041 			wmq->wmq_itr = sc->sc_itr_init;
   8042 		wmq->wmq_set_itr = true;
   8043 
   8044 		mutex_enter(txq->txq_lock);
   8045 		wm_init_tx_queue(sc, wmq, txq);
   8046 		mutex_exit(txq->txq_lock);
   8047 
   8048 		mutex_enter(rxq->rxq_lock);
   8049 		error = wm_init_rx_queue(sc, wmq, rxq);
   8050 		mutex_exit(rxq->rxq_lock);
   8051 		if (error)
   8052 			break;
   8053 	}
   8054 
   8055 	return error;
   8056 }
   8057 
   8058 /*
   8059  * wm_tx_offload:
   8060  *
   8061  *	Set up TCP/IP checksumming parameters for the
   8062  *	specified packet.
   8063  */
   8064 static void
   8065 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8066     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   8067 {
   8068 	struct mbuf *m0 = txs->txs_mbuf;
   8069 	struct livengood_tcpip_ctxdesc *t;
   8070 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   8071 	uint32_t ipcse;
   8072 	struct ether_header *eh;
   8073 	int offset, iphl;
   8074 	uint8_t fields;
   8075 
   8076 	/*
   8077 	 * XXX It would be nice if the mbuf pkthdr had offset
   8078 	 * fields for the protocol headers.
   8079 	 */
   8080 
   8081 	eh = mtod(m0, struct ether_header *);
   8082 	switch (htons(eh->ether_type)) {
   8083 	case ETHERTYPE_IP:
   8084 	case ETHERTYPE_IPV6:
   8085 		offset = ETHER_HDR_LEN;
   8086 		break;
   8087 
   8088 	case ETHERTYPE_VLAN:
   8089 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8090 		break;
   8091 
   8092 	default:
   8093 		/* Don't support this protocol or encapsulation. */
   8094 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8095 		txq->txq_last_hw_ipcs = 0;
   8096 		txq->txq_last_hw_tucs = 0;
   8097 		*fieldsp = 0;
   8098 		*cmdp = 0;
   8099 		return;
   8100 	}
   8101 
   8102 	if ((m0->m_pkthdr.csum_flags &
   8103 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8104 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8105 	} else
   8106 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8107 
   8108 	ipcse = offset + iphl - 1;
   8109 
   8110 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   8111 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   8112 	seg = 0;
   8113 	fields = 0;
   8114 
   8115 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8116 		int hlen = offset + iphl;
   8117 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8118 
   8119 		if (__predict_false(m0->m_len <
   8120 				    (hlen + sizeof(struct tcphdr)))) {
   8121 			/*
   8122 			 * TCP/IP headers are not in the first mbuf; we need
   8123 			 * to do this the slow and painful way. Let's just
   8124 			 * hope this doesn't happen very often.
   8125 			 */
   8126 			struct tcphdr th;
   8127 
   8128 			WM_Q_EVCNT_INCR(txq, tsopain);
   8129 
   8130 			m_copydata(m0, hlen, sizeof(th), &th);
   8131 			if (v4) {
   8132 				struct ip ip;
   8133 
   8134 				m_copydata(m0, offset, sizeof(ip), &ip);
   8135 				ip.ip_len = 0;
   8136 				m_copyback(m0,
   8137 				    offset + offsetof(struct ip, ip_len),
   8138 				    sizeof(ip.ip_len), &ip.ip_len);
   8139 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8140 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8141 			} else {
   8142 				struct ip6_hdr ip6;
   8143 
   8144 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8145 				ip6.ip6_plen = 0;
   8146 				m_copyback(m0,
   8147 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8148 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8149 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8150 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8151 			}
   8152 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8153 			    sizeof(th.th_sum), &th.th_sum);
   8154 
   8155 			hlen += th.th_off << 2;
   8156 		} else {
   8157 			/*
   8158 			 * TCP/IP headers are in the first mbuf; we can do
   8159 			 * this the easy way.
   8160 			 */
   8161 			struct tcphdr *th;
   8162 
   8163 			if (v4) {
   8164 				struct ip *ip =
   8165 				    (void *)(mtod(m0, char *) + offset);
   8166 				th = (void *)(mtod(m0, char *) + hlen);
   8167 
   8168 				ip->ip_len = 0;
   8169 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8170 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8171 			} else {
   8172 				struct ip6_hdr *ip6 =
   8173 				    (void *)(mtod(m0, char *) + offset);
   8174 				th = (void *)(mtod(m0, char *) + hlen);
   8175 
   8176 				ip6->ip6_plen = 0;
   8177 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8178 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8179 			}
   8180 			hlen += th->th_off << 2;
   8181 		}
   8182 
   8183 		if (v4) {
   8184 			WM_Q_EVCNT_INCR(txq, tso);
   8185 			cmdlen |= WTX_TCPIP_CMD_IP;
   8186 		} else {
   8187 			WM_Q_EVCNT_INCR(txq, tso6);
   8188 			ipcse = 0;
   8189 		}
   8190 		cmd |= WTX_TCPIP_CMD_TSE;
   8191 		cmdlen |= WTX_TCPIP_CMD_TSE |
   8192 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   8193 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   8194 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   8195 	}
   8196 
   8197 	/*
   8198 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   8199 	 * offload feature, if we load the context descriptor, we
   8200 	 * MUST provide valid values for IPCSS and TUCSS fields.
   8201 	 */
   8202 
   8203 	ipcs = WTX_TCPIP_IPCSS(offset) |
   8204 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   8205 	    WTX_TCPIP_IPCSE(ipcse);
   8206 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   8207 		WM_Q_EVCNT_INCR(txq, ipsum);
   8208 		fields |= WTX_IXSM;
   8209 	}
   8210 
   8211 	offset += iphl;
   8212 
   8213 	if (m0->m_pkthdr.csum_flags &
   8214 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   8215 		WM_Q_EVCNT_INCR(txq, tusum);
   8216 		fields |= WTX_TXSM;
   8217 		tucs = WTX_TCPIP_TUCSS(offset) |
   8218 		    WTX_TCPIP_TUCSO(offset +
   8219 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   8220 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8221 	} else if ((m0->m_pkthdr.csum_flags &
   8222 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   8223 		WM_Q_EVCNT_INCR(txq, tusum6);
   8224 		fields |= WTX_TXSM;
   8225 		tucs = WTX_TCPIP_TUCSS(offset) |
   8226 		    WTX_TCPIP_TUCSO(offset +
   8227 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   8228 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8229 	} else {
   8230 		/* Just initialize it to a valid TCP context. */
   8231 		tucs = WTX_TCPIP_TUCSS(offset) |
   8232 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   8233 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   8234 	}
   8235 
   8236 	*cmdp = cmd;
   8237 	*fieldsp = fields;
   8238 
   8239 	/*
   8240 	 * We don't have to write context descriptor for every packet
   8241 	 * except for 82574. For 82574, we must write context descriptor
   8242 	 * for every packet when we use two descriptor queues.
   8243 	 *
   8244 	 * The 82574L can only remember the *last* context used
   8245 	 * regardless of queue that it was use for.  We cannot reuse
   8246 	 * contexts on this hardware platform and must generate a new
   8247 	 * context every time.  82574L hardware spec, section 7.2.6,
   8248 	 * second note.
   8249 	 */
   8250 	if (sc->sc_nqueues < 2) {
   8251 		/*
   8252 		 * Setting up new checksum offload context for every
   8253 		 * frames takes a lot of processing time for hardware.
   8254 		 * This also reduces performance a lot for small sized
   8255 		 * frames so avoid it if driver can use previously
   8256 		 * configured checksum offload context.
   8257 		 * For TSO, in theory we can use the same TSO context only if
   8258 		 * frame is the same type(IP/TCP) and the same MSS. However
   8259 		 * checking whether a frame has the same IP/TCP structure is a
   8260 		 * hard thing so just ignore that and always restablish a
   8261 		 * new TSO context.
   8262 		 */
   8263 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   8264 		    == 0) {
   8265 			if (txq->txq_last_hw_cmd == cmd &&
   8266 			    txq->txq_last_hw_fields == fields &&
   8267 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   8268 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   8269 				WM_Q_EVCNT_INCR(txq, skipcontext);
   8270 				return;
   8271 			}
   8272 		}
   8273 
   8274 		txq->txq_last_hw_cmd = cmd;
   8275 		txq->txq_last_hw_fields = fields;
   8276 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   8277 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   8278 	}
   8279 
   8280 	/* Fill in the context descriptor. */
   8281 	t = (struct livengood_tcpip_ctxdesc *)
   8282 	    &txq->txq_descs[txq->txq_next];
   8283 	t->tcpip_ipcs = htole32(ipcs);
   8284 	t->tcpip_tucs = htole32(tucs);
   8285 	t->tcpip_cmdlen = htole32(cmdlen);
   8286 	t->tcpip_seg = htole32(seg);
   8287 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8288 
   8289 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8290 	txs->txs_ndesc++;
   8291 }
   8292 
   8293 static inline int
   8294 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   8295 {
   8296 	struct wm_softc *sc = ifp->if_softc;
   8297 	u_int cpuid = cpu_index(curcpu());
   8298 
   8299 	/*
   8300 	 * Currently, simple distribute strategy.
   8301 	 * TODO:
   8302 	 * distribute by flowid(RSS has value).
   8303 	 */
   8304 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   8305 }
   8306 
   8307 static inline bool
   8308 wm_linkdown_discard(struct wm_txqueue *txq)
   8309 {
   8310 
   8311 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   8312 		return true;
   8313 
   8314 	return false;
   8315 }
   8316 
   8317 /*
   8318  * wm_start:		[ifnet interface function]
   8319  *
   8320  *	Start packet transmission on the interface.
   8321  */
   8322 static void
   8323 wm_start(struct ifnet *ifp)
   8324 {
   8325 	struct wm_softc *sc = ifp->if_softc;
   8326 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8327 
   8328 #ifdef WM_MPSAFE
   8329 	KASSERT(if_is_mpsafe(ifp));
   8330 #endif
   8331 	/*
   8332 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8333 	 */
   8334 
   8335 	mutex_enter(txq->txq_lock);
   8336 	if (!txq->txq_stopping)
   8337 		wm_start_locked(ifp);
   8338 	mutex_exit(txq->txq_lock);
   8339 }
   8340 
   8341 static void
   8342 wm_start_locked(struct ifnet *ifp)
   8343 {
   8344 	struct wm_softc *sc = ifp->if_softc;
   8345 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8346 
   8347 	wm_send_common_locked(ifp, txq, false);
   8348 }
   8349 
   8350 static int
   8351 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   8352 {
   8353 	int qid;
   8354 	struct wm_softc *sc = ifp->if_softc;
   8355 	struct wm_txqueue *txq;
   8356 
   8357 	qid = wm_select_txqueue(ifp, m);
   8358 	txq = &sc->sc_queue[qid].wmq_txq;
   8359 
   8360 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8361 		m_freem(m);
   8362 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8363 		return ENOBUFS;
   8364 	}
   8365 
   8366 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8367 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8368 	if (m->m_flags & M_MCAST)
   8369 		if_statinc_ref(nsr, if_omcasts);
   8370 	IF_STAT_PUTREF(ifp);
   8371 
   8372 	if (mutex_tryenter(txq->txq_lock)) {
   8373 		if (!txq->txq_stopping)
   8374 			wm_transmit_locked(ifp, txq);
   8375 		mutex_exit(txq->txq_lock);
   8376 	}
   8377 
   8378 	return 0;
   8379 }
   8380 
   8381 static void
   8382 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8383 {
   8384 
   8385 	wm_send_common_locked(ifp, txq, true);
   8386 }
   8387 
   8388 static void
   8389 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8390     bool is_transmit)
   8391 {
   8392 	struct wm_softc *sc = ifp->if_softc;
   8393 	struct mbuf *m0;
   8394 	struct wm_txsoft *txs;
   8395 	bus_dmamap_t dmamap;
   8396 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   8397 	bus_addr_t curaddr;
   8398 	bus_size_t seglen, curlen;
   8399 	uint32_t cksumcmd;
   8400 	uint8_t cksumfields;
   8401 	bool remap = true;
   8402 
   8403 	KASSERT(mutex_owned(txq->txq_lock));
   8404 	KASSERT(!txq->txq_stopping);
   8405 
   8406 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8407 		return;
   8408 
   8409 	if (__predict_false(wm_linkdown_discard(txq))) {
   8410 		do {
   8411 			if (is_transmit)
   8412 				m0 = pcq_get(txq->txq_interq);
   8413 			else
   8414 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8415 			/*
   8416 			 * increment successed packet counter as in the case
   8417 			 * which the packet is discarded by link down PHY.
   8418 			 */
   8419 			if (m0 != NULL) {
   8420 				if_statinc(ifp, if_opackets);
   8421 				m_freem(m0);
   8422 			}
   8423 		} while (m0 != NULL);
   8424 		return;
   8425 	}
   8426 
   8427 	/* Remember the previous number of free descriptors. */
   8428 	ofree = txq->txq_free;
   8429 
   8430 	/*
   8431 	 * Loop through the send queue, setting up transmit descriptors
   8432 	 * until we drain the queue, or use up all available transmit
   8433 	 * descriptors.
   8434 	 */
   8435 	for (;;) {
   8436 		m0 = NULL;
   8437 
   8438 		/* Get a work queue entry. */
   8439 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8440 			wm_txeof(txq, UINT_MAX);
   8441 			if (txq->txq_sfree == 0) {
   8442 				DPRINTF(sc, WM_DEBUG_TX,
   8443 				    ("%s: TX: no free job descriptors\n",
   8444 					device_xname(sc->sc_dev)));
   8445 				WM_Q_EVCNT_INCR(txq, txsstall);
   8446 				break;
   8447 			}
   8448 		}
   8449 
   8450 		/* Grab a packet off the queue. */
   8451 		if (is_transmit)
   8452 			m0 = pcq_get(txq->txq_interq);
   8453 		else
   8454 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8455 		if (m0 == NULL)
   8456 			break;
   8457 
   8458 		DPRINTF(sc, WM_DEBUG_TX,
   8459 		    ("%s: TX: have packet to transmit: %p\n",
   8460 			device_xname(sc->sc_dev), m0));
   8461 
   8462 		txs = &txq->txq_soft[txq->txq_snext];
   8463 		dmamap = txs->txs_dmamap;
   8464 
   8465 		use_tso = (m0->m_pkthdr.csum_flags &
   8466 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   8467 
   8468 		/*
   8469 		 * So says the Linux driver:
   8470 		 * The controller does a simple calculation to make sure
   8471 		 * there is enough room in the FIFO before initiating the
   8472 		 * DMA for each buffer. The calc is:
   8473 		 *	4 = ceil(buffer len / MSS)
   8474 		 * To make sure we don't overrun the FIFO, adjust the max
   8475 		 * buffer len if the MSS drops.
   8476 		 */
   8477 		dmamap->dm_maxsegsz =
   8478 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   8479 		    ? m0->m_pkthdr.segsz << 2
   8480 		    : WTX_MAX_LEN;
   8481 
   8482 		/*
   8483 		 * Load the DMA map.  If this fails, the packet either
   8484 		 * didn't fit in the allotted number of segments, or we
   8485 		 * were short on resources.  For the too-many-segments
   8486 		 * case, we simply report an error and drop the packet,
   8487 		 * since we can't sanely copy a jumbo packet to a single
   8488 		 * buffer.
   8489 		 */
   8490 retry:
   8491 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8492 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8493 		if (__predict_false(error)) {
   8494 			if (error == EFBIG) {
   8495 				if (remap == true) {
   8496 					struct mbuf *m;
   8497 
   8498 					remap = false;
   8499 					m = m_defrag(m0, M_NOWAIT);
   8500 					if (m != NULL) {
   8501 						WM_Q_EVCNT_INCR(txq, defrag);
   8502 						m0 = m;
   8503 						goto retry;
   8504 					}
   8505 				}
   8506 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8507 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8508 				    "DMA segments, dropping...\n",
   8509 				    device_xname(sc->sc_dev));
   8510 				wm_dump_mbuf_chain(sc, m0);
   8511 				m_freem(m0);
   8512 				continue;
   8513 			}
   8514 			/* Short on resources, just stop for now. */
   8515 			DPRINTF(sc, WM_DEBUG_TX,
   8516 			    ("%s: TX: dmamap load failed: %d\n",
   8517 				device_xname(sc->sc_dev), error));
   8518 			break;
   8519 		}
   8520 
   8521 		segs_needed = dmamap->dm_nsegs;
   8522 		if (use_tso) {
   8523 			/* For sentinel descriptor; see below. */
   8524 			segs_needed++;
   8525 		}
   8526 
   8527 		/*
   8528 		 * Ensure we have enough descriptors free to describe
   8529 		 * the packet. Note, we always reserve one descriptor
   8530 		 * at the end of the ring due to the semantics of the
   8531 		 * TDT register, plus one more in the event we need
   8532 		 * to load offload context.
   8533 		 */
   8534 		if (segs_needed > txq->txq_free - 2) {
   8535 			/*
   8536 			 * Not enough free descriptors to transmit this
   8537 			 * packet.  We haven't committed anything yet,
   8538 			 * so just unload the DMA map, put the packet
   8539 			 * pack on the queue, and punt. Notify the upper
   8540 			 * layer that there are no more slots left.
   8541 			 */
   8542 			DPRINTF(sc, WM_DEBUG_TX,
   8543 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8544 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8545 				segs_needed, txq->txq_free - 1));
   8546 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8547 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8548 			WM_Q_EVCNT_INCR(txq, txdstall);
   8549 			break;
   8550 		}
   8551 
   8552 		/*
   8553 		 * Check for 82547 Tx FIFO bug. We need to do this
   8554 		 * once we know we can transmit the packet, since we
   8555 		 * do some internal FIFO space accounting here.
   8556 		 */
   8557 		if (sc->sc_type == WM_T_82547 &&
   8558 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8559 			DPRINTF(sc, WM_DEBUG_TX,
   8560 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8561 				device_xname(sc->sc_dev)));
   8562 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8563 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8564 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8565 			break;
   8566 		}
   8567 
   8568 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8569 
   8570 		DPRINTF(sc, WM_DEBUG_TX,
   8571 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8572 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8573 
   8574 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8575 
   8576 		/*
   8577 		 * Store a pointer to the packet so that we can free it
   8578 		 * later.
   8579 		 *
   8580 		 * Initially, we consider the number of descriptors the
   8581 		 * packet uses the number of DMA segments.  This may be
   8582 		 * incremented by 1 if we do checksum offload (a descriptor
   8583 		 * is used to set the checksum context).
   8584 		 */
   8585 		txs->txs_mbuf = m0;
   8586 		txs->txs_firstdesc = txq->txq_next;
   8587 		txs->txs_ndesc = segs_needed;
   8588 
   8589 		/* Set up offload parameters for this packet. */
   8590 		if (m0->m_pkthdr.csum_flags &
   8591 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8592 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8593 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8594 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8595 		} else {
   8596 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8597 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8598 			cksumcmd = 0;
   8599 			cksumfields = 0;
   8600 		}
   8601 
   8602 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8603 
   8604 		/* Sync the DMA map. */
   8605 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8606 		    BUS_DMASYNC_PREWRITE);
   8607 
   8608 		/* Initialize the transmit descriptor. */
   8609 		for (nexttx = txq->txq_next, seg = 0;
   8610 		     seg < dmamap->dm_nsegs; seg++) {
   8611 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8612 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8613 			     seglen != 0;
   8614 			     curaddr += curlen, seglen -= curlen,
   8615 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8616 				curlen = seglen;
   8617 
   8618 				/*
   8619 				 * So says the Linux driver:
   8620 				 * Work around for premature descriptor
   8621 				 * write-backs in TSO mode.  Append a
   8622 				 * 4-byte sentinel descriptor.
   8623 				 */
   8624 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8625 				    curlen > 8)
   8626 					curlen -= 4;
   8627 
   8628 				wm_set_dma_addr(
   8629 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8630 				txq->txq_descs[nexttx].wtx_cmdlen
   8631 				    = htole32(cksumcmd | curlen);
   8632 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8633 				    = 0;
   8634 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8635 				    = cksumfields;
   8636 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8637 				lasttx = nexttx;
   8638 
   8639 				DPRINTF(sc, WM_DEBUG_TX,
   8640 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8641 					"len %#04zx\n",
   8642 					device_xname(sc->sc_dev), nexttx,
   8643 					(uint64_t)curaddr, curlen));
   8644 			}
   8645 		}
   8646 
   8647 		KASSERT(lasttx != -1);
   8648 
   8649 		/*
   8650 		 * Set up the command byte on the last descriptor of
   8651 		 * the packet. If we're in the interrupt delay window,
   8652 		 * delay the interrupt.
   8653 		 */
   8654 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8655 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8656 
   8657 		/*
   8658 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8659 		 * up the descriptor to encapsulate the packet for us.
   8660 		 *
   8661 		 * This is only valid on the last descriptor of the packet.
   8662 		 */
   8663 		if (vlan_has_tag(m0)) {
   8664 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8665 			    htole32(WTX_CMD_VLE);
   8666 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8667 			    = htole16(vlan_get_tag(m0));
   8668 		}
   8669 
   8670 		txs->txs_lastdesc = lasttx;
   8671 
   8672 		DPRINTF(sc, WM_DEBUG_TX,
   8673 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8674 			device_xname(sc->sc_dev),
   8675 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8676 
   8677 		/* Sync the descriptors we're using. */
   8678 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8679 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8680 
   8681 		/* Give the packet to the chip. */
   8682 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8683 
   8684 		DPRINTF(sc, WM_DEBUG_TX,
   8685 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8686 
   8687 		DPRINTF(sc, WM_DEBUG_TX,
   8688 		    ("%s: TX: finished transmitting packet, job %d\n",
   8689 			device_xname(sc->sc_dev), txq->txq_snext));
   8690 
   8691 		/* Advance the tx pointer. */
   8692 		txq->txq_free -= txs->txs_ndesc;
   8693 		txq->txq_next = nexttx;
   8694 
   8695 		txq->txq_sfree--;
   8696 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8697 
   8698 		/* Pass the packet to any BPF listeners. */
   8699 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8700 	}
   8701 
   8702 	if (m0 != NULL) {
   8703 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8704 		WM_Q_EVCNT_INCR(txq, descdrop);
   8705 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8706 			__func__));
   8707 		m_freem(m0);
   8708 	}
   8709 
   8710 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8711 		/* No more slots; notify upper layer. */
   8712 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8713 	}
   8714 
   8715 	if (txq->txq_free != ofree) {
   8716 		/* Set a watchdog timer in case the chip flakes out. */
   8717 		txq->txq_lastsent = time_uptime;
   8718 		txq->txq_sending = true;
   8719 	}
   8720 }
   8721 
   8722 /*
   8723  * wm_nq_tx_offload:
   8724  *
   8725  *	Set up TCP/IP checksumming parameters for the
   8726  *	specified packet, for NEWQUEUE devices
   8727  */
   8728 static void
   8729 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8730     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8731 {
   8732 	struct mbuf *m0 = txs->txs_mbuf;
   8733 	uint32_t vl_len, mssidx, cmdc;
   8734 	struct ether_header *eh;
   8735 	int offset, iphl;
   8736 
   8737 	/*
   8738 	 * XXX It would be nice if the mbuf pkthdr had offset
   8739 	 * fields for the protocol headers.
   8740 	 */
   8741 	*cmdlenp = 0;
   8742 	*fieldsp = 0;
   8743 
   8744 	eh = mtod(m0, struct ether_header *);
   8745 	switch (htons(eh->ether_type)) {
   8746 	case ETHERTYPE_IP:
   8747 	case ETHERTYPE_IPV6:
   8748 		offset = ETHER_HDR_LEN;
   8749 		break;
   8750 
   8751 	case ETHERTYPE_VLAN:
   8752 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8753 		break;
   8754 
   8755 	default:
   8756 		/* Don't support this protocol or encapsulation. */
   8757 		*do_csum = false;
   8758 		return;
   8759 	}
   8760 	*do_csum = true;
   8761 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8762 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8763 
   8764 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8765 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8766 
   8767 	if ((m0->m_pkthdr.csum_flags &
   8768 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8769 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8770 	} else {
   8771 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8772 	}
   8773 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8774 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8775 
   8776 	if (vlan_has_tag(m0)) {
   8777 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8778 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8779 		*cmdlenp |= NQTX_CMD_VLE;
   8780 	}
   8781 
   8782 	mssidx = 0;
   8783 
   8784 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8785 		int hlen = offset + iphl;
   8786 		int tcp_hlen;
   8787 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8788 
   8789 		if (__predict_false(m0->m_len <
   8790 				    (hlen + sizeof(struct tcphdr)))) {
   8791 			/*
   8792 			 * TCP/IP headers are not in the first mbuf; we need
   8793 			 * to do this the slow and painful way. Let's just
   8794 			 * hope this doesn't happen very often.
   8795 			 */
   8796 			struct tcphdr th;
   8797 
   8798 			WM_Q_EVCNT_INCR(txq, tsopain);
   8799 
   8800 			m_copydata(m0, hlen, sizeof(th), &th);
   8801 			if (v4) {
   8802 				struct ip ip;
   8803 
   8804 				m_copydata(m0, offset, sizeof(ip), &ip);
   8805 				ip.ip_len = 0;
   8806 				m_copyback(m0,
   8807 				    offset + offsetof(struct ip, ip_len),
   8808 				    sizeof(ip.ip_len), &ip.ip_len);
   8809 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8810 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8811 			} else {
   8812 				struct ip6_hdr ip6;
   8813 
   8814 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8815 				ip6.ip6_plen = 0;
   8816 				m_copyback(m0,
   8817 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8818 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8819 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8820 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8821 			}
   8822 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8823 			    sizeof(th.th_sum), &th.th_sum);
   8824 
   8825 			tcp_hlen = th.th_off << 2;
   8826 		} else {
   8827 			/*
   8828 			 * TCP/IP headers are in the first mbuf; we can do
   8829 			 * this the easy way.
   8830 			 */
   8831 			struct tcphdr *th;
   8832 
   8833 			if (v4) {
   8834 				struct ip *ip =
   8835 				    (void *)(mtod(m0, char *) + offset);
   8836 				th = (void *)(mtod(m0, char *) + hlen);
   8837 
   8838 				ip->ip_len = 0;
   8839 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8840 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8841 			} else {
   8842 				struct ip6_hdr *ip6 =
   8843 				    (void *)(mtod(m0, char *) + offset);
   8844 				th = (void *)(mtod(m0, char *) + hlen);
   8845 
   8846 				ip6->ip6_plen = 0;
   8847 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8848 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8849 			}
   8850 			tcp_hlen = th->th_off << 2;
   8851 		}
   8852 		hlen += tcp_hlen;
   8853 		*cmdlenp |= NQTX_CMD_TSE;
   8854 
   8855 		if (v4) {
   8856 			WM_Q_EVCNT_INCR(txq, tso);
   8857 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8858 		} else {
   8859 			WM_Q_EVCNT_INCR(txq, tso6);
   8860 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8861 		}
   8862 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8863 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8864 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8865 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8866 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8867 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8868 	} else {
   8869 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8870 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8871 	}
   8872 
   8873 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8874 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8875 		cmdc |= NQTXC_CMD_IP4;
   8876 	}
   8877 
   8878 	if (m0->m_pkthdr.csum_flags &
   8879 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8880 		WM_Q_EVCNT_INCR(txq, tusum);
   8881 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8882 			cmdc |= NQTXC_CMD_TCP;
   8883 		else
   8884 			cmdc |= NQTXC_CMD_UDP;
   8885 
   8886 		cmdc |= NQTXC_CMD_IP4;
   8887 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8888 	}
   8889 	if (m0->m_pkthdr.csum_flags &
   8890 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8891 		WM_Q_EVCNT_INCR(txq, tusum6);
   8892 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8893 			cmdc |= NQTXC_CMD_TCP;
   8894 		else
   8895 			cmdc |= NQTXC_CMD_UDP;
   8896 
   8897 		cmdc |= NQTXC_CMD_IP6;
   8898 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8899 	}
   8900 
   8901 	/*
   8902 	 * We don't have to write context descriptor for every packet to
   8903 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8904 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8905 	 * controllers.
   8906 	 * It would be overhead to write context descriptor for every packet,
   8907 	 * however it does not cause problems.
   8908 	 */
   8909 	/* Fill in the context descriptor. */
   8910 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8911 	    htole32(vl_len);
   8912 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8913 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8914 	    htole32(cmdc);
   8915 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8916 	    htole32(mssidx);
   8917 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8918 	DPRINTF(sc, WM_DEBUG_TX,
   8919 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8920 		txq->txq_next, 0, vl_len));
   8921 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8922 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8923 	txs->txs_ndesc++;
   8924 }
   8925 
   8926 /*
   8927  * wm_nq_start:		[ifnet interface function]
   8928  *
   8929  *	Start packet transmission on the interface for NEWQUEUE devices
   8930  */
   8931 static void
   8932 wm_nq_start(struct ifnet *ifp)
   8933 {
   8934 	struct wm_softc *sc = ifp->if_softc;
   8935 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8936 
   8937 #ifdef WM_MPSAFE
   8938 	KASSERT(if_is_mpsafe(ifp));
   8939 #endif
   8940 	/*
   8941 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8942 	 */
   8943 
   8944 	mutex_enter(txq->txq_lock);
   8945 	if (!txq->txq_stopping)
   8946 		wm_nq_start_locked(ifp);
   8947 	mutex_exit(txq->txq_lock);
   8948 }
   8949 
   8950 static void
   8951 wm_nq_start_locked(struct ifnet *ifp)
   8952 {
   8953 	struct wm_softc *sc = ifp->if_softc;
   8954 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8955 
   8956 	wm_nq_send_common_locked(ifp, txq, false);
   8957 }
   8958 
   8959 static int
   8960 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8961 {
   8962 	int qid;
   8963 	struct wm_softc *sc = ifp->if_softc;
   8964 	struct wm_txqueue *txq;
   8965 
   8966 	qid = wm_select_txqueue(ifp, m);
   8967 	txq = &sc->sc_queue[qid].wmq_txq;
   8968 
   8969 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8970 		m_freem(m);
   8971 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8972 		return ENOBUFS;
   8973 	}
   8974 
   8975 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8976 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8977 	if (m->m_flags & M_MCAST)
   8978 		if_statinc_ref(nsr, if_omcasts);
   8979 	IF_STAT_PUTREF(ifp);
   8980 
   8981 	/*
   8982 	 * The situations which this mutex_tryenter() fails at running time
   8983 	 * are below two patterns.
   8984 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8985 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8986 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8987 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8988 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8989 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8990 	 * stuck, either.
   8991 	 */
   8992 	if (mutex_tryenter(txq->txq_lock)) {
   8993 		if (!txq->txq_stopping)
   8994 			wm_nq_transmit_locked(ifp, txq);
   8995 		mutex_exit(txq->txq_lock);
   8996 	}
   8997 
   8998 	return 0;
   8999 }
   9000 
   9001 static void
   9002 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   9003 {
   9004 
   9005 	wm_nq_send_common_locked(ifp, txq, true);
   9006 }
   9007 
   9008 static void
   9009 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   9010     bool is_transmit)
   9011 {
   9012 	struct wm_softc *sc = ifp->if_softc;
   9013 	struct mbuf *m0;
   9014 	struct wm_txsoft *txs;
   9015 	bus_dmamap_t dmamap;
   9016 	int error, nexttx, lasttx = -1, seg, segs_needed;
   9017 	bool do_csum, sent;
   9018 	bool remap = true;
   9019 
   9020 	KASSERT(mutex_owned(txq->txq_lock));
   9021 	KASSERT(!txq->txq_stopping);
   9022 
   9023 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   9024 		return;
   9025 
   9026 	if (__predict_false(wm_linkdown_discard(txq))) {
   9027 		do {
   9028 			if (is_transmit)
   9029 				m0 = pcq_get(txq->txq_interq);
   9030 			else
   9031 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   9032 			/*
   9033 			 * increment successed packet counter as in the case
   9034 			 * which the packet is discarded by link down PHY.
   9035 			 */
   9036 			if (m0 != NULL) {
   9037 				if_statinc(ifp, if_opackets);
   9038 				m_freem(m0);
   9039 			}
   9040 		} while (m0 != NULL);
   9041 		return;
   9042 	}
   9043 
   9044 	sent = false;
   9045 
   9046 	/*
   9047 	 * Loop through the send queue, setting up transmit descriptors
   9048 	 * until we drain the queue, or use up all available transmit
   9049 	 * descriptors.
   9050 	 */
   9051 	for (;;) {
   9052 		m0 = NULL;
   9053 
   9054 		/* Get a work queue entry. */
   9055 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   9056 			wm_txeof(txq, UINT_MAX);
   9057 			if (txq->txq_sfree == 0) {
   9058 				DPRINTF(sc, WM_DEBUG_TX,
   9059 				    ("%s: TX: no free job descriptors\n",
   9060 					device_xname(sc->sc_dev)));
   9061 				WM_Q_EVCNT_INCR(txq, txsstall);
   9062 				break;
   9063 			}
   9064 		}
   9065 
   9066 		/* Grab a packet off the queue. */
   9067 		if (is_transmit)
   9068 			m0 = pcq_get(txq->txq_interq);
   9069 		else
   9070 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   9071 		if (m0 == NULL)
   9072 			break;
   9073 
   9074 		DPRINTF(sc, WM_DEBUG_TX,
   9075 		    ("%s: TX: have packet to transmit: %p\n",
   9076 		    device_xname(sc->sc_dev), m0));
   9077 
   9078 		txs = &txq->txq_soft[txq->txq_snext];
   9079 		dmamap = txs->txs_dmamap;
   9080 
   9081 		/*
   9082 		 * Load the DMA map.  If this fails, the packet either
   9083 		 * didn't fit in the allotted number of segments, or we
   9084 		 * were short on resources.  For the too-many-segments
   9085 		 * case, we simply report an error and drop the packet,
   9086 		 * since we can't sanely copy a jumbo packet to a single
   9087 		 * buffer.
   9088 		 */
   9089 retry:
   9090 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   9091 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   9092 		if (__predict_false(error)) {
   9093 			if (error == EFBIG) {
   9094 				if (remap == true) {
   9095 					struct mbuf *m;
   9096 
   9097 					remap = false;
   9098 					m = m_defrag(m0, M_NOWAIT);
   9099 					if (m != NULL) {
   9100 						WM_Q_EVCNT_INCR(txq, defrag);
   9101 						m0 = m;
   9102 						goto retry;
   9103 					}
   9104 				}
   9105 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   9106 				log(LOG_ERR, "%s: Tx packet consumes too many "
   9107 				    "DMA segments, dropping...\n",
   9108 				    device_xname(sc->sc_dev));
   9109 				wm_dump_mbuf_chain(sc, m0);
   9110 				m_freem(m0);
   9111 				continue;
   9112 			}
   9113 			/* Short on resources, just stop for now. */
   9114 			DPRINTF(sc, WM_DEBUG_TX,
   9115 			    ("%s: TX: dmamap load failed: %d\n",
   9116 				device_xname(sc->sc_dev), error));
   9117 			break;
   9118 		}
   9119 
   9120 		segs_needed = dmamap->dm_nsegs;
   9121 
   9122 		/*
   9123 		 * Ensure we have enough descriptors free to describe
   9124 		 * the packet. Note, we always reserve one descriptor
   9125 		 * at the end of the ring due to the semantics of the
   9126 		 * TDT register, plus one more in the event we need
   9127 		 * to load offload context.
   9128 		 */
   9129 		if (segs_needed > txq->txq_free - 2) {
   9130 			/*
   9131 			 * Not enough free descriptors to transmit this
   9132 			 * packet.  We haven't committed anything yet,
   9133 			 * so just unload the DMA map, put the packet
   9134 			 * pack on the queue, and punt. Notify the upper
   9135 			 * layer that there are no more slots left.
   9136 			 */
   9137 			DPRINTF(sc, WM_DEBUG_TX,
   9138 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   9139 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   9140 				segs_needed, txq->txq_free - 1));
   9141 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   9142 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   9143 			WM_Q_EVCNT_INCR(txq, txdstall);
   9144 			break;
   9145 		}
   9146 
   9147 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   9148 
   9149 		DPRINTF(sc, WM_DEBUG_TX,
   9150 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   9151 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   9152 
   9153 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   9154 
   9155 		/*
   9156 		 * Store a pointer to the packet so that we can free it
   9157 		 * later.
   9158 		 *
   9159 		 * Initially, we consider the number of descriptors the
   9160 		 * packet uses the number of DMA segments.  This may be
   9161 		 * incremented by 1 if we do checksum offload (a descriptor
   9162 		 * is used to set the checksum context).
   9163 		 */
   9164 		txs->txs_mbuf = m0;
   9165 		txs->txs_firstdesc = txq->txq_next;
   9166 		txs->txs_ndesc = segs_needed;
   9167 
   9168 		/* Set up offload parameters for this packet. */
   9169 		uint32_t cmdlen, fields, dcmdlen;
   9170 		if (m0->m_pkthdr.csum_flags &
   9171 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   9172 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9173 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   9174 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   9175 			    &do_csum);
   9176 		} else {
   9177 			do_csum = false;
   9178 			cmdlen = 0;
   9179 			fields = 0;
   9180 		}
   9181 
   9182 		/* Sync the DMA map. */
   9183 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   9184 		    BUS_DMASYNC_PREWRITE);
   9185 
   9186 		/* Initialize the first transmit descriptor. */
   9187 		nexttx = txq->txq_next;
   9188 		if (!do_csum) {
   9189 			/* Set up a legacy descriptor */
   9190 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   9191 			    dmamap->dm_segs[0].ds_addr);
   9192 			txq->txq_descs[nexttx].wtx_cmdlen =
   9193 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   9194 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   9195 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   9196 			if (vlan_has_tag(m0)) {
   9197 				txq->txq_descs[nexttx].wtx_cmdlen |=
   9198 				    htole32(WTX_CMD_VLE);
   9199 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   9200 				    htole16(vlan_get_tag(m0));
   9201 			} else
   9202 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   9203 
   9204 			dcmdlen = 0;
   9205 		} else {
   9206 			/* Set up an advanced data descriptor */
   9207 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9208 			    htole64(dmamap->dm_segs[0].ds_addr);
   9209 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   9210 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9211 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   9212 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   9213 			    htole32(fields);
   9214 			DPRINTF(sc, WM_DEBUG_TX,
   9215 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   9216 				device_xname(sc->sc_dev), nexttx,
   9217 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   9218 			DPRINTF(sc, WM_DEBUG_TX,
   9219 			    ("\t 0x%08x%08x\n", fields,
   9220 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   9221 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   9222 		}
   9223 
   9224 		lasttx = nexttx;
   9225 		nexttx = WM_NEXTTX(txq, nexttx);
   9226 		/*
   9227 		 * Fill in the next descriptors. Legacy or advanced format
   9228 		 * is the same here.
   9229 		 */
   9230 		for (seg = 1; seg < dmamap->dm_nsegs;
   9231 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   9232 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   9233 			    htole64(dmamap->dm_segs[seg].ds_addr);
   9234 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   9235 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   9236 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   9237 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   9238 			lasttx = nexttx;
   9239 
   9240 			DPRINTF(sc, WM_DEBUG_TX,
   9241 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   9242 				device_xname(sc->sc_dev), nexttx,
   9243 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   9244 				dmamap->dm_segs[seg].ds_len));
   9245 		}
   9246 
   9247 		KASSERT(lasttx != -1);
   9248 
   9249 		/*
   9250 		 * Set up the command byte on the last descriptor of
   9251 		 * the packet. If we're in the interrupt delay window,
   9252 		 * delay the interrupt.
   9253 		 */
   9254 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   9255 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   9256 		txq->txq_descs[lasttx].wtx_cmdlen |=
   9257 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   9258 
   9259 		txs->txs_lastdesc = lasttx;
   9260 
   9261 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   9262 		    device_xname(sc->sc_dev),
   9263 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   9264 
   9265 		/* Sync the descriptors we're using. */
   9266 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   9267 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   9268 
   9269 		/* Give the packet to the chip. */
   9270 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   9271 		sent = true;
   9272 
   9273 		DPRINTF(sc, WM_DEBUG_TX,
   9274 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   9275 
   9276 		DPRINTF(sc, WM_DEBUG_TX,
   9277 		    ("%s: TX: finished transmitting packet, job %d\n",
   9278 			device_xname(sc->sc_dev), txq->txq_snext));
   9279 
   9280 		/* Advance the tx pointer. */
   9281 		txq->txq_free -= txs->txs_ndesc;
   9282 		txq->txq_next = nexttx;
   9283 
   9284 		txq->txq_sfree--;
   9285 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   9286 
   9287 		/* Pass the packet to any BPF listeners. */
   9288 		bpf_mtap(ifp, m0, BPF_D_OUT);
   9289 	}
   9290 
   9291 	if (m0 != NULL) {
   9292 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9293 		WM_Q_EVCNT_INCR(txq, descdrop);
   9294 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   9295 			__func__));
   9296 		m_freem(m0);
   9297 	}
   9298 
   9299 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   9300 		/* No more slots; notify upper layer. */
   9301 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   9302 	}
   9303 
   9304 	if (sent) {
   9305 		/* Set a watchdog timer in case the chip flakes out. */
   9306 		txq->txq_lastsent = time_uptime;
   9307 		txq->txq_sending = true;
   9308 	}
   9309 }
   9310 
   9311 static void
   9312 wm_deferred_start_locked(struct wm_txqueue *txq)
   9313 {
   9314 	struct wm_softc *sc = txq->txq_sc;
   9315 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9316 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   9317 	int qid = wmq->wmq_id;
   9318 
   9319 	KASSERT(mutex_owned(txq->txq_lock));
   9320 	KASSERT(!txq->txq_stopping);
   9321 
   9322 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   9323 		/* XXX need for ALTQ or one CPU system */
   9324 		if (qid == 0)
   9325 			wm_nq_start_locked(ifp);
   9326 		wm_nq_transmit_locked(ifp, txq);
   9327 	} else {
   9328 		/* XXX need for ALTQ or one CPU system */
   9329 		if (qid == 0)
   9330 			wm_start_locked(ifp);
   9331 		wm_transmit_locked(ifp, txq);
   9332 	}
   9333 }
   9334 
   9335 /* Interrupt */
   9336 
   9337 /*
   9338  * wm_txeof:
   9339  *
   9340  *	Helper; handle transmit interrupts.
   9341  */
   9342 static bool
   9343 wm_txeof(struct wm_txqueue *txq, u_int limit)
   9344 {
   9345 	struct wm_softc *sc = txq->txq_sc;
   9346 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9347 	struct wm_txsoft *txs;
   9348 	int count = 0;
   9349 	int i;
   9350 	uint8_t status;
   9351 	bool more = false;
   9352 
   9353 	KASSERT(mutex_owned(txq->txq_lock));
   9354 
   9355 	if (txq->txq_stopping)
   9356 		return false;
   9357 
   9358 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   9359 
   9360 	/*
   9361 	 * Go through the Tx list and free mbufs for those
   9362 	 * frames which have been transmitted.
   9363 	 */
   9364 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   9365 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   9366 		txs = &txq->txq_soft[i];
   9367 
   9368 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   9369 			device_xname(sc->sc_dev), i));
   9370 
   9371 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   9372 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9373 
   9374 		status =
   9375 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   9376 		if ((status & WTX_ST_DD) == 0) {
   9377 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   9378 			    BUS_DMASYNC_PREREAD);
   9379 			break;
   9380 		}
   9381 
   9382 		if (limit-- == 0) {
   9383 			more = true;
   9384 			DPRINTF(sc, WM_DEBUG_TX,
   9385 			    ("%s: TX: loop limited, job %d is not processed\n",
   9386 				device_xname(sc->sc_dev), i));
   9387 			break;
   9388 		}
   9389 
   9390 		count++;
   9391 		DPRINTF(sc, WM_DEBUG_TX,
   9392 		    ("%s: TX: job %d done: descs %d..%d\n",
   9393 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   9394 		    txs->txs_lastdesc));
   9395 
   9396 		/*
   9397 		 * XXX We should probably be using the statistics
   9398 		 * XXX registers, but I don't know if they exist
   9399 		 * XXX on chips before the i82544.
   9400 		 */
   9401 
   9402 #ifdef WM_EVENT_COUNTERS
   9403 		if (status & WTX_ST_TU)
   9404 			WM_Q_EVCNT_INCR(txq, underrun);
   9405 #endif /* WM_EVENT_COUNTERS */
   9406 
   9407 		/*
   9408 		 * 82574 and newer's document says the status field has neither
   9409 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   9410 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   9411 		 * Developer's Manual", 82574 datasheet and newer.
   9412 		 *
   9413 		 * XXX I saw the LC bit was set on I218 even though the media
   9414 		 * was full duplex, so the bit might be used for other
   9415 		 * meaning ...(I have no document).
   9416 		 */
   9417 
   9418 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   9419 		    && ((sc->sc_type < WM_T_82574)
   9420 			|| (sc->sc_type == WM_T_80003))) {
   9421 			if_statinc(ifp, if_oerrors);
   9422 			if (status & WTX_ST_LC)
   9423 				log(LOG_WARNING, "%s: late collision\n",
   9424 				    device_xname(sc->sc_dev));
   9425 			else if (status & WTX_ST_EC) {
   9426 				if_statadd(ifp, if_collisions,
   9427 				    TX_COLLISION_THRESHOLD + 1);
   9428 				log(LOG_WARNING, "%s: excessive collisions\n",
   9429 				    device_xname(sc->sc_dev));
   9430 			}
   9431 		} else
   9432 			if_statinc(ifp, if_opackets);
   9433 
   9434 		txq->txq_packets++;
   9435 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   9436 
   9437 		txq->txq_free += txs->txs_ndesc;
   9438 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   9439 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   9440 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   9441 		m_freem(txs->txs_mbuf);
   9442 		txs->txs_mbuf = NULL;
   9443 	}
   9444 
   9445 	/* Update the dirty transmit buffer pointer. */
   9446 	txq->txq_sdirty = i;
   9447 	DPRINTF(sc, WM_DEBUG_TX,
   9448 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   9449 
   9450 	if (count != 0)
   9451 		rnd_add_uint32(&sc->rnd_source, count);
   9452 
   9453 	/*
   9454 	 * If there are no more pending transmissions, cancel the watchdog
   9455 	 * timer.
   9456 	 */
   9457 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   9458 		txq->txq_sending = false;
   9459 
   9460 	return more;
   9461 }
   9462 
   9463 static inline uint32_t
   9464 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   9465 {
   9466 	struct wm_softc *sc = rxq->rxq_sc;
   9467 
   9468 	if (sc->sc_type == WM_T_82574)
   9469 		return EXTRXC_STATUS(
   9470 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9471 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9472 		return NQRXC_STATUS(
   9473 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9474 	else
   9475 		return rxq->rxq_descs[idx].wrx_status;
   9476 }
   9477 
   9478 static inline uint32_t
   9479 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9480 {
   9481 	struct wm_softc *sc = rxq->rxq_sc;
   9482 
   9483 	if (sc->sc_type == WM_T_82574)
   9484 		return EXTRXC_ERROR(
   9485 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9486 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9487 		return NQRXC_ERROR(
   9488 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9489 	else
   9490 		return rxq->rxq_descs[idx].wrx_errors;
   9491 }
   9492 
   9493 static inline uint16_t
   9494 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9495 {
   9496 	struct wm_softc *sc = rxq->rxq_sc;
   9497 
   9498 	if (sc->sc_type == WM_T_82574)
   9499 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9500 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9501 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9502 	else
   9503 		return rxq->rxq_descs[idx].wrx_special;
   9504 }
   9505 
   9506 static inline int
   9507 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9508 {
   9509 	struct wm_softc *sc = rxq->rxq_sc;
   9510 
   9511 	if (sc->sc_type == WM_T_82574)
   9512 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9513 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9514 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9515 	else
   9516 		return rxq->rxq_descs[idx].wrx_len;
   9517 }
   9518 
   9519 #ifdef WM_DEBUG
   9520 static inline uint32_t
   9521 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9522 {
   9523 	struct wm_softc *sc = rxq->rxq_sc;
   9524 
   9525 	if (sc->sc_type == WM_T_82574)
   9526 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9527 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9528 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9529 	else
   9530 		return 0;
   9531 }
   9532 
   9533 static inline uint8_t
   9534 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9535 {
   9536 	struct wm_softc *sc = rxq->rxq_sc;
   9537 
   9538 	if (sc->sc_type == WM_T_82574)
   9539 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9540 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9541 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9542 	else
   9543 		return 0;
   9544 }
   9545 #endif /* WM_DEBUG */
   9546 
   9547 static inline bool
   9548 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9549     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9550 {
   9551 
   9552 	if (sc->sc_type == WM_T_82574)
   9553 		return (status & ext_bit) != 0;
   9554 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9555 		return (status & nq_bit) != 0;
   9556 	else
   9557 		return (status & legacy_bit) != 0;
   9558 }
   9559 
   9560 static inline bool
   9561 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9562     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9563 {
   9564 
   9565 	if (sc->sc_type == WM_T_82574)
   9566 		return (error & ext_bit) != 0;
   9567 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9568 		return (error & nq_bit) != 0;
   9569 	else
   9570 		return (error & legacy_bit) != 0;
   9571 }
   9572 
   9573 static inline bool
   9574 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9575 {
   9576 
   9577 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9578 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9579 		return true;
   9580 	else
   9581 		return false;
   9582 }
   9583 
   9584 static inline bool
   9585 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9586 {
   9587 	struct wm_softc *sc = rxq->rxq_sc;
   9588 
   9589 	/* XXX missing error bit for newqueue? */
   9590 	if (wm_rxdesc_is_set_error(sc, errors,
   9591 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9592 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9593 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9594 		NQRXC_ERROR_RXE)) {
   9595 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9596 		    EXTRXC_ERROR_SE, 0))
   9597 			log(LOG_WARNING, "%s: symbol error\n",
   9598 			    device_xname(sc->sc_dev));
   9599 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9600 		    EXTRXC_ERROR_SEQ, 0))
   9601 			log(LOG_WARNING, "%s: receive sequence error\n",
   9602 			    device_xname(sc->sc_dev));
   9603 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9604 		    EXTRXC_ERROR_CE, 0))
   9605 			log(LOG_WARNING, "%s: CRC error\n",
   9606 			    device_xname(sc->sc_dev));
   9607 		return true;
   9608 	}
   9609 
   9610 	return false;
   9611 }
   9612 
   9613 static inline bool
   9614 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9615 {
   9616 	struct wm_softc *sc = rxq->rxq_sc;
   9617 
   9618 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9619 		NQRXC_STATUS_DD)) {
   9620 		/* We have processed all of the receive descriptors. */
   9621 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9622 		return false;
   9623 	}
   9624 
   9625 	return true;
   9626 }
   9627 
   9628 static inline bool
   9629 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9630     uint16_t vlantag, struct mbuf *m)
   9631 {
   9632 
   9633 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9634 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9635 		vlan_set_tag(m, le16toh(vlantag));
   9636 	}
   9637 
   9638 	return true;
   9639 }
   9640 
   9641 static inline void
   9642 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9643     uint32_t errors, struct mbuf *m)
   9644 {
   9645 	struct wm_softc *sc = rxq->rxq_sc;
   9646 
   9647 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9648 		if (wm_rxdesc_is_set_status(sc, status,
   9649 		    WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9650 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9651 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9652 			if (wm_rxdesc_is_set_error(sc, errors,
   9653 			    WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9654 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9655 		}
   9656 		if (wm_rxdesc_is_set_status(sc, status,
   9657 		    WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9658 			/*
   9659 			 * Note: we don't know if this was TCP or UDP,
   9660 			 * so we just set both bits, and expect the
   9661 			 * upper layers to deal.
   9662 			 */
   9663 			WM_Q_EVCNT_INCR(rxq, tusum);
   9664 			m->m_pkthdr.csum_flags |=
   9665 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9666 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9667 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9668 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9669 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9670 		}
   9671 	}
   9672 }
   9673 
   9674 /*
   9675  * wm_rxeof:
   9676  *
   9677  *	Helper; handle receive interrupts.
   9678  */
   9679 static bool
   9680 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9681 {
   9682 	struct wm_softc *sc = rxq->rxq_sc;
   9683 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9684 	struct wm_rxsoft *rxs;
   9685 	struct mbuf *m;
   9686 	int i, len;
   9687 	int count = 0;
   9688 	uint32_t status, errors;
   9689 	uint16_t vlantag;
   9690 	bool more = false;
   9691 
   9692 	KASSERT(mutex_owned(rxq->rxq_lock));
   9693 
   9694 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9695 		rxs = &rxq->rxq_soft[i];
   9696 
   9697 		DPRINTF(sc, WM_DEBUG_RX,
   9698 		    ("%s: RX: checking descriptor %d\n",
   9699 			device_xname(sc->sc_dev), i));
   9700 		wm_cdrxsync(rxq, i,
   9701 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9702 
   9703 		status = wm_rxdesc_get_status(rxq, i);
   9704 		errors = wm_rxdesc_get_errors(rxq, i);
   9705 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9706 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9707 #ifdef WM_DEBUG
   9708 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9709 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9710 #endif
   9711 
   9712 		if (!wm_rxdesc_dd(rxq, i, status))
   9713 			break;
   9714 
   9715 		if (limit-- == 0) {
   9716 			more = true;
   9717 			DPRINTF(sc, WM_DEBUG_RX,
   9718 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9719 				device_xname(sc->sc_dev), i));
   9720 			break;
   9721 		}
   9722 
   9723 		count++;
   9724 		if (__predict_false(rxq->rxq_discard)) {
   9725 			DPRINTF(sc, WM_DEBUG_RX,
   9726 			    ("%s: RX: discarding contents of descriptor %d\n",
   9727 				device_xname(sc->sc_dev), i));
   9728 			wm_init_rxdesc(rxq, i);
   9729 			if (wm_rxdesc_is_eop(rxq, status)) {
   9730 				/* Reset our state. */
   9731 				DPRINTF(sc, WM_DEBUG_RX,
   9732 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9733 					device_xname(sc->sc_dev)));
   9734 				rxq->rxq_discard = 0;
   9735 			}
   9736 			continue;
   9737 		}
   9738 
   9739 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9740 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9741 
   9742 		m = rxs->rxs_mbuf;
   9743 
   9744 		/*
   9745 		 * Add a new receive buffer to the ring, unless of
   9746 		 * course the length is zero. Treat the latter as a
   9747 		 * failed mapping.
   9748 		 */
   9749 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9750 			/*
   9751 			 * Failed, throw away what we've done so
   9752 			 * far, and discard the rest of the packet.
   9753 			 */
   9754 			if_statinc(ifp, if_ierrors);
   9755 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9756 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9757 			wm_init_rxdesc(rxq, i);
   9758 			if (!wm_rxdesc_is_eop(rxq, status))
   9759 				rxq->rxq_discard = 1;
   9760 			if (rxq->rxq_head != NULL)
   9761 				m_freem(rxq->rxq_head);
   9762 			WM_RXCHAIN_RESET(rxq);
   9763 			DPRINTF(sc, WM_DEBUG_RX,
   9764 			    ("%s: RX: Rx buffer allocation failed, "
   9765 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9766 				rxq->rxq_discard ? " (discard)" : ""));
   9767 			continue;
   9768 		}
   9769 
   9770 		m->m_len = len;
   9771 		rxq->rxq_len += len;
   9772 		DPRINTF(sc, WM_DEBUG_RX,
   9773 		    ("%s: RX: buffer at %p len %d\n",
   9774 			device_xname(sc->sc_dev), m->m_data, len));
   9775 
   9776 		/* If this is not the end of the packet, keep looking. */
   9777 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9778 			WM_RXCHAIN_LINK(rxq, m);
   9779 			DPRINTF(sc, WM_DEBUG_RX,
   9780 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9781 				device_xname(sc->sc_dev), rxq->rxq_len));
   9782 			continue;
   9783 		}
   9784 
   9785 		/*
   9786 		 * Okay, we have the entire packet now. The chip is
   9787 		 * configured to include the FCS except I35[04], I21[01].
   9788 		 * (not all chips can be configured to strip it), so we need
   9789 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9790 		 * in RCTL register is always set, so we don't trim it.
   9791 		 * PCH2 and newer chip also not include FCS when jumbo
   9792 		 * frame is used to do workaround an errata.
   9793 		 * May need to adjust length of previous mbuf in the
   9794 		 * chain if the current mbuf is too short.
   9795 		 */
   9796 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9797 			if (m->m_len < ETHER_CRC_LEN) {
   9798 				rxq->rxq_tail->m_len
   9799 				    -= (ETHER_CRC_LEN - m->m_len);
   9800 				m->m_len = 0;
   9801 			} else
   9802 				m->m_len -= ETHER_CRC_LEN;
   9803 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9804 		} else
   9805 			len = rxq->rxq_len;
   9806 
   9807 		WM_RXCHAIN_LINK(rxq, m);
   9808 
   9809 		*rxq->rxq_tailp = NULL;
   9810 		m = rxq->rxq_head;
   9811 
   9812 		WM_RXCHAIN_RESET(rxq);
   9813 
   9814 		DPRINTF(sc, WM_DEBUG_RX,
   9815 		    ("%s: RX: have entire packet, len -> %d\n",
   9816 			device_xname(sc->sc_dev), len));
   9817 
   9818 		/* If an error occurred, update stats and drop the packet. */
   9819 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9820 			m_freem(m);
   9821 			continue;
   9822 		}
   9823 
   9824 		/* No errors.  Receive the packet. */
   9825 		m_set_rcvif(m, ifp);
   9826 		m->m_pkthdr.len = len;
   9827 		/*
   9828 		 * TODO
   9829 		 * should be save rsshash and rsstype to this mbuf.
   9830 		 */
   9831 		DPRINTF(sc, WM_DEBUG_RX,
   9832 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9833 			device_xname(sc->sc_dev), rsstype, rsshash));
   9834 
   9835 		/*
   9836 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9837 		 * for us.  Associate the tag with the packet.
   9838 		 */
   9839 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9840 			continue;
   9841 
   9842 		/* Set up checksum info for this packet. */
   9843 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9844 
   9845 		rxq->rxq_packets++;
   9846 		rxq->rxq_bytes += len;
   9847 		/* Pass it on. */
   9848 		if_percpuq_enqueue(sc->sc_ipq, m);
   9849 
   9850 		if (rxq->rxq_stopping)
   9851 			break;
   9852 	}
   9853 	rxq->rxq_ptr = i;
   9854 
   9855 	if (count != 0)
   9856 		rnd_add_uint32(&sc->rnd_source, count);
   9857 
   9858 	DPRINTF(sc, WM_DEBUG_RX,
   9859 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9860 
   9861 	return more;
   9862 }
   9863 
   9864 /*
   9865  * wm_linkintr_gmii:
   9866  *
   9867  *	Helper; handle link interrupts for GMII.
   9868  */
   9869 static void
   9870 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9871 {
   9872 	device_t dev = sc->sc_dev;
   9873 	uint32_t status, reg;
   9874 	bool link;
   9875 	int rv;
   9876 
   9877 	KASSERT(WM_CORE_LOCKED(sc));
   9878 
   9879 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9880 		__func__));
   9881 
   9882 	if ((icr & ICR_LSC) == 0) {
   9883 		if (icr & ICR_RXSEQ)
   9884 			DPRINTF(sc, WM_DEBUG_LINK,
   9885 			    ("%s: LINK Receive sequence error\n",
   9886 				device_xname(dev)));
   9887 		return;
   9888 	}
   9889 
   9890 	/* Link status changed */
   9891 	status = CSR_READ(sc, WMREG_STATUS);
   9892 	link = status & STATUS_LU;
   9893 	if (link) {
   9894 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9895 			device_xname(dev),
   9896 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9897 		if (wm_phy_need_linkdown_discard(sc)) {
   9898 			DPRINTF(sc, WM_DEBUG_LINK,
   9899 			    ("%s: linkintr: Clear linkdown discard flag\n",
   9900 				device_xname(dev)));
   9901 			wm_clear_linkdown_discard(sc);
   9902 		}
   9903 	} else {
   9904 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9905 			device_xname(dev)));
   9906 		if (wm_phy_need_linkdown_discard(sc)) {
   9907 			DPRINTF(sc, WM_DEBUG_LINK,
   9908 			    ("%s: linkintr: Set linkdown discard flag\n",
   9909 				device_xname(dev)));
   9910 			wm_set_linkdown_discard(sc);
   9911 		}
   9912 	}
   9913 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9914 		wm_gig_downshift_workaround_ich8lan(sc);
   9915 
   9916 	if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
   9917 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9918 
   9919 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9920 		device_xname(dev)));
   9921 	mii_pollstat(&sc->sc_mii);
   9922 	if (sc->sc_type == WM_T_82543) {
   9923 		int miistatus, active;
   9924 
   9925 		/*
   9926 		 * With 82543, we need to force speed and
   9927 		 * duplex on the MAC equal to what the PHY
   9928 		 * speed and duplex configuration is.
   9929 		 */
   9930 		miistatus = sc->sc_mii.mii_media_status;
   9931 
   9932 		if (miistatus & IFM_ACTIVE) {
   9933 			active = sc->sc_mii.mii_media_active;
   9934 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9935 			switch (IFM_SUBTYPE(active)) {
   9936 			case IFM_10_T:
   9937 				sc->sc_ctrl |= CTRL_SPEED_10;
   9938 				break;
   9939 			case IFM_100_TX:
   9940 				sc->sc_ctrl |= CTRL_SPEED_100;
   9941 				break;
   9942 			case IFM_1000_T:
   9943 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9944 				break;
   9945 			default:
   9946 				/*
   9947 				 * Fiber?
   9948 				 * Shoud not enter here.
   9949 				 */
   9950 				device_printf(dev, "unknown media (%x)\n",
   9951 				    active);
   9952 				break;
   9953 			}
   9954 			if (active & IFM_FDX)
   9955 				sc->sc_ctrl |= CTRL_FD;
   9956 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9957 		}
   9958 	} else if (sc->sc_type == WM_T_PCH) {
   9959 		wm_k1_gig_workaround_hv(sc,
   9960 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9961 	}
   9962 
   9963 	/*
   9964 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9965 	 * aggressive resulting in many collisions. To avoid this, increase
   9966 	 * the IPG and reduce Rx latency in the PHY.
   9967 	 */
   9968 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9969 	    && link) {
   9970 		uint32_t tipg_reg;
   9971 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9972 		bool fdx;
   9973 		uint16_t emi_addr, emi_val;
   9974 
   9975 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9976 		tipg_reg &= ~TIPG_IPGT_MASK;
   9977 		fdx = status & STATUS_FD;
   9978 
   9979 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9980 			tipg_reg |= 0xff;
   9981 			/* Reduce Rx latency in analog PHY */
   9982 			emi_val = 0;
   9983 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9984 		    fdx && speed != STATUS_SPEED_1000) {
   9985 			tipg_reg |= 0xc;
   9986 			emi_val = 1;
   9987 		} else {
   9988 			/* Roll back the default values */
   9989 			tipg_reg |= 0x08;
   9990 			emi_val = 1;
   9991 		}
   9992 
   9993 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9994 
   9995 		rv = sc->phy.acquire(sc);
   9996 		if (rv)
   9997 			return;
   9998 
   9999 		if (sc->sc_type == WM_T_PCH2)
   10000 			emi_addr = I82579_RX_CONFIG;
   10001 		else
   10002 			emi_addr = I217_RX_CONFIG;
   10003 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   10004 
   10005 		if (sc->sc_type >= WM_T_PCH_LPT) {
   10006 			uint16_t phy_reg;
   10007 
   10008 			sc->phy.readreg_locked(dev, 2,
   10009 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   10010 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   10011 			if (speed == STATUS_SPEED_100
   10012 			    || speed == STATUS_SPEED_10)
   10013 				phy_reg |= 0x3e8;
   10014 			else
   10015 				phy_reg |= 0xfa;
   10016 			sc->phy.writereg_locked(dev, 2,
   10017 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   10018 
   10019 			if (speed == STATUS_SPEED_1000) {
   10020 				sc->phy.readreg_locked(dev, 2,
   10021 				    HV_PM_CTRL, &phy_reg);
   10022 
   10023 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   10024 
   10025 				sc->phy.writereg_locked(dev, 2,
   10026 				    HV_PM_CTRL, phy_reg);
   10027 			}
   10028 		}
   10029 		sc->phy.release(sc);
   10030 
   10031 		if (rv)
   10032 			return;
   10033 
   10034 		if (sc->sc_type >= WM_T_PCH_SPT) {
   10035 			uint16_t data, ptr_gap;
   10036 
   10037 			if (speed == STATUS_SPEED_1000) {
   10038 				rv = sc->phy.acquire(sc);
   10039 				if (rv)
   10040 					return;
   10041 
   10042 				rv = sc->phy.readreg_locked(dev, 2,
   10043 				    I82579_UNKNOWN1, &data);
   10044 				if (rv) {
   10045 					sc->phy.release(sc);
   10046 					return;
   10047 				}
   10048 
   10049 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   10050 				if (ptr_gap < 0x18) {
   10051 					data &= ~(0x3ff << 2);
   10052 					data |= (0x18 << 2);
   10053 					rv = sc->phy.writereg_locked(dev,
   10054 					    2, I82579_UNKNOWN1, data);
   10055 				}
   10056 				sc->phy.release(sc);
   10057 				if (rv)
   10058 					return;
   10059 			} else {
   10060 				rv = sc->phy.acquire(sc);
   10061 				if (rv)
   10062 					return;
   10063 
   10064 				rv = sc->phy.writereg_locked(dev, 2,
   10065 				    I82579_UNKNOWN1, 0xc023);
   10066 				sc->phy.release(sc);
   10067 				if (rv)
   10068 					return;
   10069 
   10070 			}
   10071 		}
   10072 	}
   10073 
   10074 	/*
   10075 	 * I217 Packet Loss issue:
   10076 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   10077 	 * on power up.
   10078 	 * Set the Beacon Duration for I217 to 8 usec
   10079 	 */
   10080 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10081 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   10082 		reg &= ~FEXTNVM4_BEACON_DURATION;
   10083 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   10084 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   10085 	}
   10086 
   10087 	/* Work-around I218 hang issue */
   10088 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   10089 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   10090 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   10091 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   10092 		wm_k1_workaround_lpt_lp(sc, link);
   10093 
   10094 	if (sc->sc_type >= WM_T_PCH_LPT) {
   10095 		/*
   10096 		 * Set platform power management values for Latency
   10097 		 * Tolerance Reporting (LTR)
   10098 		 */
   10099 		wm_platform_pm_pch_lpt(sc,
   10100 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   10101 	}
   10102 
   10103 	/* Clear link partner's EEE ability */
   10104 	sc->eee_lp_ability = 0;
   10105 
   10106 	/* FEXTNVM6 K1-off workaround */
   10107 	if (sc->sc_type == WM_T_PCH_SPT) {
   10108 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   10109 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   10110 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   10111 		else
   10112 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   10113 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   10114 	}
   10115 
   10116 	if (!link)
   10117 		return;
   10118 
   10119 	switch (sc->sc_type) {
   10120 	case WM_T_PCH2:
   10121 		wm_k1_workaround_lv(sc);
   10122 		/* FALLTHROUGH */
   10123 	case WM_T_PCH:
   10124 		if (sc->sc_phytype == WMPHY_82578)
   10125 			wm_link_stall_workaround_hv(sc);
   10126 		break;
   10127 	default:
   10128 		break;
   10129 	}
   10130 
   10131 	/* Enable/Disable EEE after link up */
   10132 	if (sc->sc_phytype > WMPHY_82579)
   10133 		wm_set_eee_pchlan(sc);
   10134 }
   10135 
   10136 /*
   10137  * wm_linkintr_tbi:
   10138  *
   10139  *	Helper; handle link interrupts for TBI mode.
   10140  */
   10141 static void
   10142 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   10143 {
   10144 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10145 	uint32_t status;
   10146 
   10147 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10148 		__func__));
   10149 
   10150 	status = CSR_READ(sc, WMREG_STATUS);
   10151 	if (icr & ICR_LSC) {
   10152 		wm_check_for_link(sc);
   10153 		if (status & STATUS_LU) {
   10154 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   10155 				device_xname(sc->sc_dev),
   10156 				(status & STATUS_FD) ? "FDX" : "HDX"));
   10157 			/*
   10158 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10159 			 * so we should update sc->sc_ctrl
   10160 			 */
   10161 
   10162 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10163 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10164 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10165 			if (status & STATUS_FD)
   10166 				sc->sc_tctl |=
   10167 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10168 			else
   10169 				sc->sc_tctl |=
   10170 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10171 			if (sc->sc_ctrl & CTRL_TFCE)
   10172 				sc->sc_fcrtl |= FCRTL_XONE;
   10173 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10174 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10175 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   10176 			sc->sc_tbi_linkup = 1;
   10177 			if_link_state_change(ifp, LINK_STATE_UP);
   10178 		} else {
   10179 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10180 				device_xname(sc->sc_dev)));
   10181 			sc->sc_tbi_linkup = 0;
   10182 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10183 		}
   10184 		/* Update LED */
   10185 		wm_tbi_serdes_set_linkled(sc);
   10186 	} else if (icr & ICR_RXSEQ)
   10187 		DPRINTF(sc, WM_DEBUG_LINK,
   10188 		    ("%s: LINK: Receive sequence error\n",
   10189 			device_xname(sc->sc_dev)));
   10190 }
   10191 
   10192 /*
   10193  * wm_linkintr_serdes:
   10194  *
   10195  *	Helper; handle link interrupts for TBI mode.
   10196  */
   10197 static void
   10198 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   10199 {
   10200 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10201 	struct mii_data *mii = &sc->sc_mii;
   10202 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10203 	uint32_t pcs_adv, pcs_lpab, reg;
   10204 
   10205 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   10206 		__func__));
   10207 
   10208 	if (icr & ICR_LSC) {
   10209 		/* Check PCS */
   10210 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10211 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   10212 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   10213 				device_xname(sc->sc_dev)));
   10214 			mii->mii_media_status |= IFM_ACTIVE;
   10215 			sc->sc_tbi_linkup = 1;
   10216 			if_link_state_change(ifp, LINK_STATE_UP);
   10217 		} else {
   10218 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   10219 				device_xname(sc->sc_dev)));
   10220 			mii->mii_media_status |= IFM_NONE;
   10221 			sc->sc_tbi_linkup = 0;
   10222 			if_link_state_change(ifp, LINK_STATE_DOWN);
   10223 			wm_tbi_serdes_set_linkled(sc);
   10224 			return;
   10225 		}
   10226 		mii->mii_media_active |= IFM_1000_SX;
   10227 		if ((reg & PCS_LSTS_FDX) != 0)
   10228 			mii->mii_media_active |= IFM_FDX;
   10229 		else
   10230 			mii->mii_media_active |= IFM_HDX;
   10231 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10232 			/* Check flow */
   10233 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10234 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10235 				DPRINTF(sc, WM_DEBUG_LINK,
   10236 				    ("XXX LINKOK but not ACOMP\n"));
   10237 				return;
   10238 			}
   10239 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10240 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10241 			DPRINTF(sc, WM_DEBUG_LINK,
   10242 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   10243 			if ((pcs_adv & TXCW_SYM_PAUSE)
   10244 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10245 				mii->mii_media_active |= IFM_FLOW
   10246 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10247 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10248 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10249 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   10250 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10251 				mii->mii_media_active |= IFM_FLOW
   10252 				    | IFM_ETH_TXPAUSE;
   10253 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   10254 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   10255 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10256 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   10257 				mii->mii_media_active |= IFM_FLOW
   10258 				    | IFM_ETH_RXPAUSE;
   10259 		}
   10260 		/* Update LED */
   10261 		wm_tbi_serdes_set_linkled(sc);
   10262 	} else
   10263 		DPRINTF(sc, WM_DEBUG_LINK,
   10264 		    ("%s: LINK: Receive sequence error\n",
   10265 		    device_xname(sc->sc_dev)));
   10266 }
   10267 
   10268 /*
   10269  * wm_linkintr:
   10270  *
   10271  *	Helper; handle link interrupts.
   10272  */
   10273 static void
   10274 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   10275 {
   10276 
   10277 	KASSERT(WM_CORE_LOCKED(sc));
   10278 
   10279 	if (sc->sc_flags & WM_F_HAS_MII)
   10280 		wm_linkintr_gmii(sc, icr);
   10281 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10282 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   10283 		wm_linkintr_serdes(sc, icr);
   10284 	else
   10285 		wm_linkintr_tbi(sc, icr);
   10286 }
   10287 
   10288 
   10289 static inline void
   10290 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   10291 {
   10292 
   10293 	if (wmq->wmq_txrx_use_workqueue)
   10294 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   10295 	else
   10296 		softint_schedule(wmq->wmq_si);
   10297 }
   10298 
   10299 static inline void
   10300 wm_legacy_intr_disable(struct wm_softc *sc)
   10301 {
   10302 
   10303 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   10304 }
   10305 
   10306 static inline void
   10307 wm_legacy_intr_enable(struct wm_softc *sc)
   10308 {
   10309 
   10310 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   10311 }
   10312 
   10313 /*
   10314  * wm_intr_legacy:
   10315  *
   10316  *	Interrupt service routine for INTx and MSI.
   10317  */
   10318 static int
   10319 wm_intr_legacy(void *arg)
   10320 {
   10321 	struct wm_softc *sc = arg;
   10322 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10323 	struct wm_queue *wmq = &sc->sc_queue[0];
   10324 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10325 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10326 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10327 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10328 	uint32_t icr, rndval = 0;
   10329 	bool more = false;
   10330 
   10331 	icr = CSR_READ(sc, WMREG_ICR);
   10332 	if ((icr & sc->sc_icr) == 0)
   10333 		return 0;
   10334 
   10335 	DPRINTF(sc, WM_DEBUG_TX,
   10336 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   10337 	if (rndval == 0)
   10338 		rndval = icr;
   10339 
   10340 	mutex_enter(txq->txq_lock);
   10341 
   10342 	if (txq->txq_stopping) {
   10343 		mutex_exit(txq->txq_lock);
   10344 		return 1;
   10345 	}
   10346 
   10347 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10348 	if (icr & ICR_TXDW) {
   10349 		DPRINTF(sc, WM_DEBUG_TX,
   10350 		    ("%s: TX: got TXDW interrupt\n",
   10351 			device_xname(sc->sc_dev)));
   10352 		WM_Q_EVCNT_INCR(txq, txdw);
   10353 	}
   10354 #endif
   10355 	if (txlimit > 0) {
   10356 		more |= wm_txeof(txq, txlimit);
   10357 		if (!IF_IS_EMPTY(&ifp->if_snd))
   10358 			more = true;
   10359 	} else
   10360 		more = true;
   10361 	mutex_exit(txq->txq_lock);
   10362 
   10363 	mutex_enter(rxq->rxq_lock);
   10364 
   10365 	if (rxq->rxq_stopping) {
   10366 		mutex_exit(rxq->rxq_lock);
   10367 		return 1;
   10368 	}
   10369 
   10370 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   10371 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   10372 		DPRINTF(sc, WM_DEBUG_RX,
   10373 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
   10374 			device_xname(sc->sc_dev),
   10375 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   10376 		WM_Q_EVCNT_INCR(rxq, intr);
   10377 	}
   10378 #endif
   10379 	if (rxlimit > 0) {
   10380 		/*
   10381 		 * wm_rxeof() does *not* call upper layer functions directly,
   10382 		 * as if_percpuq_enqueue() just call softint_schedule().
   10383 		 * So, we can call wm_rxeof() in interrupt context.
   10384 		 */
   10385 		more = wm_rxeof(rxq, rxlimit);
   10386 	} else
   10387 		more = true;
   10388 
   10389 	mutex_exit(rxq->rxq_lock);
   10390 
   10391 	WM_CORE_LOCK(sc);
   10392 
   10393 	if (sc->sc_core_stopping) {
   10394 		WM_CORE_UNLOCK(sc);
   10395 		return 1;
   10396 	}
   10397 
   10398 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   10399 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10400 		wm_linkintr(sc, icr);
   10401 	}
   10402 	if ((icr & ICR_GPI(0)) != 0)
   10403 		device_printf(sc->sc_dev, "got module interrupt\n");
   10404 
   10405 	WM_CORE_UNLOCK(sc);
   10406 
   10407 	if (icr & ICR_RXO) {
   10408 #if defined(WM_DEBUG)
   10409 		log(LOG_WARNING, "%s: Receive overrun\n",
   10410 		    device_xname(sc->sc_dev));
   10411 #endif /* defined(WM_DEBUG) */
   10412 	}
   10413 
   10414 	rnd_add_uint32(&sc->rnd_source, rndval);
   10415 
   10416 	if (more) {
   10417 		/* Try to get more packets going. */
   10418 		wm_legacy_intr_disable(sc);
   10419 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10420 		wm_sched_handle_queue(sc, wmq);
   10421 	}
   10422 
   10423 	return 1;
   10424 }
   10425 
   10426 static inline void
   10427 wm_txrxintr_disable(struct wm_queue *wmq)
   10428 {
   10429 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10430 
   10431 	if (__predict_false(!wm_is_using_msix(sc))) {
   10432 		wm_legacy_intr_disable(sc);
   10433 		return;
   10434 	}
   10435 
   10436 	if (sc->sc_type == WM_T_82574)
   10437 		CSR_WRITE(sc, WMREG_IMC,
   10438 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   10439 	else if (sc->sc_type == WM_T_82575)
   10440 		CSR_WRITE(sc, WMREG_EIMC,
   10441 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10442 	else
   10443 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   10444 }
   10445 
   10446 static inline void
   10447 wm_txrxintr_enable(struct wm_queue *wmq)
   10448 {
   10449 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10450 
   10451 	wm_itrs_calculate(sc, wmq);
   10452 
   10453 	if (__predict_false(!wm_is_using_msix(sc))) {
   10454 		wm_legacy_intr_enable(sc);
   10455 		return;
   10456 	}
   10457 
   10458 	/*
   10459 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   10460 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   10461 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   10462 	 * while each wm_handle_queue(wmq) is runnig.
   10463 	 */
   10464 	if (sc->sc_type == WM_T_82574)
   10465 		CSR_WRITE(sc, WMREG_IMS,
   10466 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   10467 	else if (sc->sc_type == WM_T_82575)
   10468 		CSR_WRITE(sc, WMREG_EIMS,
   10469 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10470 	else
   10471 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   10472 }
   10473 
   10474 static int
   10475 wm_txrxintr_msix(void *arg)
   10476 {
   10477 	struct wm_queue *wmq = arg;
   10478 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10479 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10480 	struct wm_softc *sc = txq->txq_sc;
   10481 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10482 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10483 	bool txmore;
   10484 	bool rxmore;
   10485 
   10486 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   10487 
   10488 	DPRINTF(sc, WM_DEBUG_TX,
   10489 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   10490 
   10491 	wm_txrxintr_disable(wmq);
   10492 
   10493 	mutex_enter(txq->txq_lock);
   10494 
   10495 	if (txq->txq_stopping) {
   10496 		mutex_exit(txq->txq_lock);
   10497 		return 1;
   10498 	}
   10499 
   10500 	WM_Q_EVCNT_INCR(txq, txdw);
   10501 	if (txlimit > 0) {
   10502 		txmore = wm_txeof(txq, txlimit);
   10503 		/* wm_deferred start() is done in wm_handle_queue(). */
   10504 	} else
   10505 		txmore = true;
   10506 	mutex_exit(txq->txq_lock);
   10507 
   10508 	DPRINTF(sc, WM_DEBUG_RX,
   10509 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   10510 	mutex_enter(rxq->rxq_lock);
   10511 
   10512 	if (rxq->rxq_stopping) {
   10513 		mutex_exit(rxq->rxq_lock);
   10514 		return 1;
   10515 	}
   10516 
   10517 	WM_Q_EVCNT_INCR(rxq, intr);
   10518 	if (rxlimit > 0) {
   10519 		rxmore = wm_rxeof(rxq, rxlimit);
   10520 	} else
   10521 		rxmore = true;
   10522 	mutex_exit(rxq->rxq_lock);
   10523 
   10524 	wm_itrs_writereg(sc, wmq);
   10525 
   10526 	if (txmore || rxmore) {
   10527 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10528 		wm_sched_handle_queue(sc, wmq);
   10529 	} else
   10530 		wm_txrxintr_enable(wmq);
   10531 
   10532 	return 1;
   10533 }
   10534 
   10535 static void
   10536 wm_handle_queue(void *arg)
   10537 {
   10538 	struct wm_queue *wmq = arg;
   10539 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10540 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10541 	struct wm_softc *sc = txq->txq_sc;
   10542 	u_int txlimit = sc->sc_tx_process_limit;
   10543 	u_int rxlimit = sc->sc_rx_process_limit;
   10544 	bool txmore;
   10545 	bool rxmore;
   10546 
   10547 	mutex_enter(txq->txq_lock);
   10548 	if (txq->txq_stopping) {
   10549 		mutex_exit(txq->txq_lock);
   10550 		return;
   10551 	}
   10552 	txmore = wm_txeof(txq, txlimit);
   10553 	wm_deferred_start_locked(txq);
   10554 	mutex_exit(txq->txq_lock);
   10555 
   10556 	mutex_enter(rxq->rxq_lock);
   10557 	if (rxq->rxq_stopping) {
   10558 		mutex_exit(rxq->rxq_lock);
   10559 		return;
   10560 	}
   10561 	WM_Q_EVCNT_INCR(rxq, defer);
   10562 	rxmore = wm_rxeof(rxq, rxlimit);
   10563 	mutex_exit(rxq->rxq_lock);
   10564 
   10565 	if (txmore || rxmore) {
   10566 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10567 		wm_sched_handle_queue(sc, wmq);
   10568 	} else
   10569 		wm_txrxintr_enable(wmq);
   10570 }
   10571 
   10572 static void
   10573 wm_handle_queue_work(struct work *wk, void *context)
   10574 {
   10575 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10576 
   10577 	/*
   10578 	 * "enqueued flag" is not required here.
   10579 	 */
   10580 	wm_handle_queue(wmq);
   10581 }
   10582 
   10583 /*
   10584  * wm_linkintr_msix:
   10585  *
   10586  *	Interrupt service routine for link status change for MSI-X.
   10587  */
   10588 static int
   10589 wm_linkintr_msix(void *arg)
   10590 {
   10591 	struct wm_softc *sc = arg;
   10592 	uint32_t reg;
   10593 	bool has_rxo;
   10594 
   10595 	reg = CSR_READ(sc, WMREG_ICR);
   10596 	WM_CORE_LOCK(sc);
   10597 	DPRINTF(sc, WM_DEBUG_LINK,
   10598 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10599 		device_xname(sc->sc_dev), reg));
   10600 
   10601 	if (sc->sc_core_stopping)
   10602 		goto out;
   10603 
   10604 	if ((reg & ICR_LSC) != 0) {
   10605 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10606 		wm_linkintr(sc, ICR_LSC);
   10607 	}
   10608 	if ((reg & ICR_GPI(0)) != 0)
   10609 		device_printf(sc->sc_dev, "got module interrupt\n");
   10610 
   10611 	/*
   10612 	 * XXX 82574 MSI-X mode workaround
   10613 	 *
   10614 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10615 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10616 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10617 	 * interrupts by writing WMREG_ICS to process receive packets.
   10618 	 */
   10619 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10620 #if defined(WM_DEBUG)
   10621 		log(LOG_WARNING, "%s: Receive overrun\n",
   10622 		    device_xname(sc->sc_dev));
   10623 #endif /* defined(WM_DEBUG) */
   10624 
   10625 		has_rxo = true;
   10626 		/*
   10627 		 * The RXO interrupt is very high rate when receive traffic is
   10628 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10629 		 * interrupts. ICR_OTHER will be enabled at the end of
   10630 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10631 		 * ICR_RXQ(1) interrupts.
   10632 		 */
   10633 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10634 
   10635 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10636 	}
   10637 
   10638 
   10639 
   10640 out:
   10641 	WM_CORE_UNLOCK(sc);
   10642 
   10643 	if (sc->sc_type == WM_T_82574) {
   10644 		if (!has_rxo)
   10645 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10646 		else
   10647 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10648 	} else if (sc->sc_type == WM_T_82575)
   10649 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10650 	else
   10651 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10652 
   10653 	return 1;
   10654 }
   10655 
   10656 /*
   10657  * Media related.
   10658  * GMII, SGMII, TBI (and SERDES)
   10659  */
   10660 
   10661 /* Common */
   10662 
   10663 /*
   10664  * wm_tbi_serdes_set_linkled:
   10665  *
   10666  *	Update the link LED on TBI and SERDES devices.
   10667  */
   10668 static void
   10669 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10670 {
   10671 
   10672 	if (sc->sc_tbi_linkup)
   10673 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10674 	else
   10675 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10676 
   10677 	/* 82540 or newer devices are active low */
   10678 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10679 
   10680 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10681 }
   10682 
   10683 /* GMII related */
   10684 
   10685 /*
   10686  * wm_gmii_reset:
   10687  *
   10688  *	Reset the PHY.
   10689  */
   10690 static void
   10691 wm_gmii_reset(struct wm_softc *sc)
   10692 {
   10693 	uint32_t reg;
   10694 	int rv;
   10695 
   10696 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10697 		device_xname(sc->sc_dev), __func__));
   10698 
   10699 	rv = sc->phy.acquire(sc);
   10700 	if (rv != 0) {
   10701 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10702 		    __func__);
   10703 		return;
   10704 	}
   10705 
   10706 	switch (sc->sc_type) {
   10707 	case WM_T_82542_2_0:
   10708 	case WM_T_82542_2_1:
   10709 		/* null */
   10710 		break;
   10711 	case WM_T_82543:
   10712 		/*
   10713 		 * With 82543, we need to force speed and duplex on the MAC
   10714 		 * equal to what the PHY speed and duplex configuration is.
   10715 		 * In addition, we need to perform a hardware reset on the PHY
   10716 		 * to take it out of reset.
   10717 		 */
   10718 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10719 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10720 
   10721 		/* The PHY reset pin is active-low. */
   10722 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10723 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10724 		    CTRL_EXT_SWDPIN(4));
   10725 		reg |= CTRL_EXT_SWDPIO(4);
   10726 
   10727 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10728 		CSR_WRITE_FLUSH(sc);
   10729 		delay(10*1000);
   10730 
   10731 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10732 		CSR_WRITE_FLUSH(sc);
   10733 		delay(150);
   10734 #if 0
   10735 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10736 #endif
   10737 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10738 		break;
   10739 	case WM_T_82544:	/* Reset 10000us */
   10740 	case WM_T_82540:
   10741 	case WM_T_82545:
   10742 	case WM_T_82545_3:
   10743 	case WM_T_82546:
   10744 	case WM_T_82546_3:
   10745 	case WM_T_82541:
   10746 	case WM_T_82541_2:
   10747 	case WM_T_82547:
   10748 	case WM_T_82547_2:
   10749 	case WM_T_82571:	/* Reset 100us */
   10750 	case WM_T_82572:
   10751 	case WM_T_82573:
   10752 	case WM_T_82574:
   10753 	case WM_T_82575:
   10754 	case WM_T_82576:
   10755 	case WM_T_82580:
   10756 	case WM_T_I350:
   10757 	case WM_T_I354:
   10758 	case WM_T_I210:
   10759 	case WM_T_I211:
   10760 	case WM_T_82583:
   10761 	case WM_T_80003:
   10762 		/* Generic reset */
   10763 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10764 		CSR_WRITE_FLUSH(sc);
   10765 		delay(20000);
   10766 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10767 		CSR_WRITE_FLUSH(sc);
   10768 		delay(20000);
   10769 
   10770 		if ((sc->sc_type == WM_T_82541)
   10771 		    || (sc->sc_type == WM_T_82541_2)
   10772 		    || (sc->sc_type == WM_T_82547)
   10773 		    || (sc->sc_type == WM_T_82547_2)) {
   10774 			/* Workaround for igp are done in igp_reset() */
   10775 			/* XXX add code to set LED after phy reset */
   10776 		}
   10777 		break;
   10778 	case WM_T_ICH8:
   10779 	case WM_T_ICH9:
   10780 	case WM_T_ICH10:
   10781 	case WM_T_PCH:
   10782 	case WM_T_PCH2:
   10783 	case WM_T_PCH_LPT:
   10784 	case WM_T_PCH_SPT:
   10785 	case WM_T_PCH_CNP:
   10786 		/* Generic reset */
   10787 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10788 		CSR_WRITE_FLUSH(sc);
   10789 		delay(100);
   10790 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10791 		CSR_WRITE_FLUSH(sc);
   10792 		delay(150);
   10793 		break;
   10794 	default:
   10795 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10796 		    __func__);
   10797 		break;
   10798 	}
   10799 
   10800 	sc->phy.release(sc);
   10801 
   10802 	/* get_cfg_done */
   10803 	wm_get_cfg_done(sc);
   10804 
   10805 	/* Extra setup */
   10806 	switch (sc->sc_type) {
   10807 	case WM_T_82542_2_0:
   10808 	case WM_T_82542_2_1:
   10809 	case WM_T_82543:
   10810 	case WM_T_82544:
   10811 	case WM_T_82540:
   10812 	case WM_T_82545:
   10813 	case WM_T_82545_3:
   10814 	case WM_T_82546:
   10815 	case WM_T_82546_3:
   10816 	case WM_T_82541_2:
   10817 	case WM_T_82547_2:
   10818 	case WM_T_82571:
   10819 	case WM_T_82572:
   10820 	case WM_T_82573:
   10821 	case WM_T_82574:
   10822 	case WM_T_82583:
   10823 	case WM_T_82575:
   10824 	case WM_T_82576:
   10825 	case WM_T_82580:
   10826 	case WM_T_I350:
   10827 	case WM_T_I354:
   10828 	case WM_T_I210:
   10829 	case WM_T_I211:
   10830 	case WM_T_80003:
   10831 		/* Null */
   10832 		break;
   10833 	case WM_T_82541:
   10834 	case WM_T_82547:
   10835 		/* XXX Configure actively LED after PHY reset */
   10836 		break;
   10837 	case WM_T_ICH8:
   10838 	case WM_T_ICH9:
   10839 	case WM_T_ICH10:
   10840 	case WM_T_PCH:
   10841 	case WM_T_PCH2:
   10842 	case WM_T_PCH_LPT:
   10843 	case WM_T_PCH_SPT:
   10844 	case WM_T_PCH_CNP:
   10845 		wm_phy_post_reset(sc);
   10846 		break;
   10847 	default:
   10848 		panic("%s: unknown type\n", __func__);
   10849 		break;
   10850 	}
   10851 }
   10852 
   10853 /*
   10854  * Set up sc_phytype and mii_{read|write}reg.
   10855  *
   10856  *  To identify PHY type, correct read/write function should be selected.
   10857  * To select correct read/write function, PCI ID or MAC type are required
   10858  * without accessing PHY registers.
   10859  *
   10860  *  On the first call of this function, PHY ID is not known yet. Check
   10861  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10862  * result might be incorrect.
   10863  *
   10864  *  In the second call, PHY OUI and model is used to identify PHY type.
   10865  * It might not be perfect because of the lack of compared entry, but it
   10866  * would be better than the first call.
   10867  *
   10868  *  If the detected new result and previous assumption is different,
   10869  * a diagnostic message will be printed.
   10870  */
   10871 static void
   10872 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10873     uint16_t phy_model)
   10874 {
   10875 	device_t dev = sc->sc_dev;
   10876 	struct mii_data *mii = &sc->sc_mii;
   10877 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10878 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10879 	mii_readreg_t new_readreg;
   10880 	mii_writereg_t new_writereg;
   10881 	bool dodiag = true;
   10882 
   10883 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10884 		device_xname(sc->sc_dev), __func__));
   10885 
   10886 	/*
   10887 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10888 	 * incorrect. So don't print diag output when it's 2nd call.
   10889 	 */
   10890 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10891 		dodiag = false;
   10892 
   10893 	if (mii->mii_readreg == NULL) {
   10894 		/*
   10895 		 *  This is the first call of this function. For ICH and PCH
   10896 		 * variants, it's difficult to determine the PHY access method
   10897 		 * by sc_type, so use the PCI product ID for some devices.
   10898 		 */
   10899 
   10900 		switch (sc->sc_pcidevid) {
   10901 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10902 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10903 			/* 82577 */
   10904 			new_phytype = WMPHY_82577;
   10905 			break;
   10906 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10907 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10908 			/* 82578 */
   10909 			new_phytype = WMPHY_82578;
   10910 			break;
   10911 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10912 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10913 			/* 82579 */
   10914 			new_phytype = WMPHY_82579;
   10915 			break;
   10916 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10917 		case PCI_PRODUCT_INTEL_82801I_BM:
   10918 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10919 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10920 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10921 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10922 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10923 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10924 			/* ICH8, 9, 10 with 82567 */
   10925 			new_phytype = WMPHY_BM;
   10926 			break;
   10927 		default:
   10928 			break;
   10929 		}
   10930 	} else {
   10931 		/* It's not the first call. Use PHY OUI and model */
   10932 		switch (phy_oui) {
   10933 		case MII_OUI_ATTANSIC: /* atphy(4) */
   10934 			switch (phy_model) {
   10935 			case MII_MODEL_ATTANSIC_AR8021:
   10936 				new_phytype = WMPHY_82578;
   10937 				break;
   10938 			default:
   10939 				break;
   10940 			}
   10941 			break;
   10942 		case MII_OUI_xxMARVELL:
   10943 			switch (phy_model) {
   10944 			case MII_MODEL_xxMARVELL_I210:
   10945 				new_phytype = WMPHY_I210;
   10946 				break;
   10947 			case MII_MODEL_xxMARVELL_E1011:
   10948 			case MII_MODEL_xxMARVELL_E1000_3:
   10949 			case MII_MODEL_xxMARVELL_E1000_5:
   10950 			case MII_MODEL_xxMARVELL_E1112:
   10951 				new_phytype = WMPHY_M88;
   10952 				break;
   10953 			case MII_MODEL_xxMARVELL_E1149:
   10954 				new_phytype = WMPHY_BM;
   10955 				break;
   10956 			case MII_MODEL_xxMARVELL_E1111:
   10957 			case MII_MODEL_xxMARVELL_I347:
   10958 			case MII_MODEL_xxMARVELL_E1512:
   10959 			case MII_MODEL_xxMARVELL_E1340M:
   10960 			case MII_MODEL_xxMARVELL_E1543:
   10961 				new_phytype = WMPHY_M88;
   10962 				break;
   10963 			case MII_MODEL_xxMARVELL_I82563:
   10964 				new_phytype = WMPHY_GG82563;
   10965 				break;
   10966 			default:
   10967 				break;
   10968 			}
   10969 			break;
   10970 		case MII_OUI_INTEL:
   10971 			switch (phy_model) {
   10972 			case MII_MODEL_INTEL_I82577:
   10973 				new_phytype = WMPHY_82577;
   10974 				break;
   10975 			case MII_MODEL_INTEL_I82579:
   10976 				new_phytype = WMPHY_82579;
   10977 				break;
   10978 			case MII_MODEL_INTEL_I217:
   10979 				new_phytype = WMPHY_I217;
   10980 				break;
   10981 			case MII_MODEL_INTEL_I82580:
   10982 				new_phytype = WMPHY_82580;
   10983 				break;
   10984 			case MII_MODEL_INTEL_I350:
   10985 				new_phytype = WMPHY_I350;
   10986 				break;
   10987 			default:
   10988 				break;
   10989 			}
   10990 			break;
   10991 		case MII_OUI_yyINTEL:
   10992 			switch (phy_model) {
   10993 			case MII_MODEL_yyINTEL_I82562G:
   10994 			case MII_MODEL_yyINTEL_I82562EM:
   10995 			case MII_MODEL_yyINTEL_I82562ET:
   10996 				new_phytype = WMPHY_IFE;
   10997 				break;
   10998 			case MII_MODEL_yyINTEL_IGP01E1000:
   10999 				new_phytype = WMPHY_IGP;
   11000 				break;
   11001 			case MII_MODEL_yyINTEL_I82566:
   11002 				new_phytype = WMPHY_IGP_3;
   11003 				break;
   11004 			default:
   11005 				break;
   11006 			}
   11007 			break;
   11008 		default:
   11009 			break;
   11010 		}
   11011 
   11012 		if (dodiag) {
   11013 			if (new_phytype == WMPHY_UNKNOWN)
   11014 				aprint_verbose_dev(dev,
   11015 				    "%s: Unknown PHY model. OUI=%06x, "
   11016 				    "model=%04x\n", __func__, phy_oui,
   11017 				    phy_model);
   11018 
   11019 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11020 			    && (sc->sc_phytype != new_phytype)) {
   11021 				aprint_error_dev(dev, "Previously assumed PHY "
   11022 				    "type(%u) was incorrect. PHY type from PHY"
   11023 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   11024 			}
   11025 		}
   11026 	}
   11027 
   11028 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   11029 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   11030 		/* SGMII */
   11031 		new_readreg = wm_sgmii_readreg;
   11032 		new_writereg = wm_sgmii_writereg;
   11033 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11034 		/* BM2 (phyaddr == 1) */
   11035 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11036 		    && (new_phytype != WMPHY_BM)
   11037 		    && (new_phytype != WMPHY_UNKNOWN))
   11038 			doubt_phytype = new_phytype;
   11039 		new_phytype = WMPHY_BM;
   11040 		new_readreg = wm_gmii_bm_readreg;
   11041 		new_writereg = wm_gmii_bm_writereg;
   11042 	} else if (sc->sc_type >= WM_T_PCH) {
   11043 		/* All PCH* use _hv_ */
   11044 		new_readreg = wm_gmii_hv_readreg;
   11045 		new_writereg = wm_gmii_hv_writereg;
   11046 	} else if (sc->sc_type >= WM_T_ICH8) {
   11047 		/* non-82567 ICH8, 9 and 10 */
   11048 		new_readreg = wm_gmii_i82544_readreg;
   11049 		new_writereg = wm_gmii_i82544_writereg;
   11050 	} else if (sc->sc_type >= WM_T_80003) {
   11051 		/* 80003 */
   11052 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11053 		    && (new_phytype != WMPHY_GG82563)
   11054 		    && (new_phytype != WMPHY_UNKNOWN))
   11055 			doubt_phytype = new_phytype;
   11056 		new_phytype = WMPHY_GG82563;
   11057 		new_readreg = wm_gmii_i80003_readreg;
   11058 		new_writereg = wm_gmii_i80003_writereg;
   11059 	} else if (sc->sc_type >= WM_T_I210) {
   11060 		/* I210 and I211 */
   11061 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11062 		    && (new_phytype != WMPHY_I210)
   11063 		    && (new_phytype != WMPHY_UNKNOWN))
   11064 			doubt_phytype = new_phytype;
   11065 		new_phytype = WMPHY_I210;
   11066 		new_readreg = wm_gmii_gs40g_readreg;
   11067 		new_writereg = wm_gmii_gs40g_writereg;
   11068 	} else if (sc->sc_type >= WM_T_82580) {
   11069 		/* 82580, I350 and I354 */
   11070 		new_readreg = wm_gmii_82580_readreg;
   11071 		new_writereg = wm_gmii_82580_writereg;
   11072 	} else if (sc->sc_type >= WM_T_82544) {
   11073 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   11074 		new_readreg = wm_gmii_i82544_readreg;
   11075 		new_writereg = wm_gmii_i82544_writereg;
   11076 	} else {
   11077 		new_readreg = wm_gmii_i82543_readreg;
   11078 		new_writereg = wm_gmii_i82543_writereg;
   11079 	}
   11080 
   11081 	if (new_phytype == WMPHY_BM) {
   11082 		/* All BM use _bm_ */
   11083 		new_readreg = wm_gmii_bm_readreg;
   11084 		new_writereg = wm_gmii_bm_writereg;
   11085 	}
   11086 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   11087 		/* All PCH* use _hv_ */
   11088 		new_readreg = wm_gmii_hv_readreg;
   11089 		new_writereg = wm_gmii_hv_writereg;
   11090 	}
   11091 
   11092 	/* Diag output */
   11093 	if (dodiag) {
   11094 		if (doubt_phytype != WMPHY_UNKNOWN)
   11095 			aprint_error_dev(dev, "Assumed new PHY type was "
   11096 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   11097 			    new_phytype);
   11098 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   11099 		    && (sc->sc_phytype != new_phytype))
   11100 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   11101 			    "was incorrect. New PHY type = %u\n",
   11102 			    sc->sc_phytype, new_phytype);
   11103 
   11104 		if ((mii->mii_readreg != NULL) &&
   11105 		    (new_phytype == WMPHY_UNKNOWN))
   11106 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   11107 
   11108 		if ((mii->mii_readreg != NULL) &&
   11109 		    (mii->mii_readreg != new_readreg))
   11110 			aprint_error_dev(dev, "Previously assumed PHY "
   11111 			    "read/write function was incorrect.\n");
   11112 	}
   11113 
   11114 	/* Update now */
   11115 	sc->sc_phytype = new_phytype;
   11116 	mii->mii_readreg = new_readreg;
   11117 	mii->mii_writereg = new_writereg;
   11118 	if (new_readreg == wm_gmii_hv_readreg) {
   11119 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   11120 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   11121 	} else if (new_readreg == wm_sgmii_readreg) {
   11122 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   11123 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   11124 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   11125 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   11126 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   11127 	}
   11128 }
   11129 
   11130 /*
   11131  * wm_get_phy_id_82575:
   11132  *
   11133  * Return PHY ID. Return -1 if it failed.
   11134  */
   11135 static int
   11136 wm_get_phy_id_82575(struct wm_softc *sc)
   11137 {
   11138 	uint32_t reg;
   11139 	int phyid = -1;
   11140 
   11141 	/* XXX */
   11142 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11143 		return -1;
   11144 
   11145 	if (wm_sgmii_uses_mdio(sc)) {
   11146 		switch (sc->sc_type) {
   11147 		case WM_T_82575:
   11148 		case WM_T_82576:
   11149 			reg = CSR_READ(sc, WMREG_MDIC);
   11150 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   11151 			break;
   11152 		case WM_T_82580:
   11153 		case WM_T_I350:
   11154 		case WM_T_I354:
   11155 		case WM_T_I210:
   11156 		case WM_T_I211:
   11157 			reg = CSR_READ(sc, WMREG_MDICNFG);
   11158 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   11159 			break;
   11160 		default:
   11161 			return -1;
   11162 		}
   11163 	}
   11164 
   11165 	return phyid;
   11166 }
   11167 
   11168 /*
   11169  * wm_gmii_mediainit:
   11170  *
   11171  *	Initialize media for use on 1000BASE-T devices.
   11172  */
   11173 static void
   11174 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   11175 {
   11176 	device_t dev = sc->sc_dev;
   11177 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11178 	struct mii_data *mii = &sc->sc_mii;
   11179 
   11180 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11181 		device_xname(sc->sc_dev), __func__));
   11182 
   11183 	/* We have GMII. */
   11184 	sc->sc_flags |= WM_F_HAS_MII;
   11185 
   11186 	if (sc->sc_type == WM_T_80003)
   11187 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11188 	else
   11189 		sc->sc_tipg = TIPG_1000T_DFLT;
   11190 
   11191 	/*
   11192 	 * Let the chip set speed/duplex on its own based on
   11193 	 * signals from the PHY.
   11194 	 * XXXbouyer - I'm not sure this is right for the 80003,
   11195 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   11196 	 */
   11197 	sc->sc_ctrl |= CTRL_SLU;
   11198 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11199 
   11200 	/* Initialize our media structures and probe the GMII. */
   11201 	mii->mii_ifp = ifp;
   11202 
   11203 	mii->mii_statchg = wm_gmii_statchg;
   11204 
   11205 	/* get PHY control from SMBus to PCIe */
   11206 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   11207 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   11208 	    || (sc->sc_type == WM_T_PCH_CNP))
   11209 		wm_init_phy_workarounds_pchlan(sc);
   11210 
   11211 	wm_gmii_reset(sc);
   11212 
   11213 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11214 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   11215 	    wm_gmii_mediastatus, sc->sc_core_lock);
   11216 
   11217 	/* Setup internal SGMII PHY for SFP */
   11218 	wm_sgmii_sfp_preconfig(sc);
   11219 
   11220 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   11221 	    || (sc->sc_type == WM_T_82580)
   11222 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   11223 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   11224 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   11225 			/* Attach only one port */
   11226 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   11227 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11228 		} else {
   11229 			int i, id;
   11230 			uint32_t ctrl_ext;
   11231 
   11232 			id = wm_get_phy_id_82575(sc);
   11233 			if (id != -1) {
   11234 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   11235 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   11236 			}
   11237 			if ((id == -1)
   11238 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11239 				/* Power on sgmii phy if it is disabled */
   11240 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11241 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   11242 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   11243 				CSR_WRITE_FLUSH(sc);
   11244 				delay(300*1000); /* XXX too long */
   11245 
   11246 				/*
   11247 				 * From 1 to 8.
   11248 				 *
   11249 				 * I2C access fails with I2C register's ERROR
   11250 				 * bit set, so prevent error message while
   11251 				 * scanning.
   11252 				 */
   11253 				sc->phy.no_errprint = true;
   11254 				for (i = 1; i < 8; i++)
   11255 					mii_attach(sc->sc_dev, &sc->sc_mii,
   11256 					    0xffffffff, i, MII_OFFSET_ANY,
   11257 					    MIIF_DOPAUSE);
   11258 				sc->phy.no_errprint = false;
   11259 
   11260 				/* Restore previous sfp cage power state */
   11261 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11262 			}
   11263 		}
   11264 	} else
   11265 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11266 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11267 
   11268 	/*
   11269 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   11270 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   11271 	 */
   11272 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   11273 		|| (sc->sc_type == WM_T_PCH_SPT)
   11274 		|| (sc->sc_type == WM_T_PCH_CNP))
   11275 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   11276 		wm_set_mdio_slow_mode_hv(sc);
   11277 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11278 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11279 	}
   11280 
   11281 	/*
   11282 	 * (For ICH8 variants)
   11283 	 * If PHY detection failed, use BM's r/w function and retry.
   11284 	 */
   11285 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11286 		/* if failed, retry with *_bm_* */
   11287 		aprint_verbose_dev(dev, "Assumed PHY access function "
   11288 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   11289 		    sc->sc_phytype);
   11290 		sc->sc_phytype = WMPHY_BM;
   11291 		mii->mii_readreg = wm_gmii_bm_readreg;
   11292 		mii->mii_writereg = wm_gmii_bm_writereg;
   11293 
   11294 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   11295 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   11296 	}
   11297 
   11298 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   11299 		/* Any PHY wasn't found */
   11300 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   11301 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   11302 		sc->sc_phytype = WMPHY_NONE;
   11303 	} else {
   11304 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   11305 
   11306 		/*
   11307 		 * PHY found! Check PHY type again by the second call of
   11308 		 * wm_gmii_setup_phytype.
   11309 		 */
   11310 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   11311 		    child->mii_mpd_model);
   11312 
   11313 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   11314 	}
   11315 }
   11316 
   11317 /*
   11318  * wm_gmii_mediachange:	[ifmedia interface function]
   11319  *
   11320  *	Set hardware to newly-selected media on a 1000BASE-T device.
   11321  */
   11322 static int
   11323 wm_gmii_mediachange(struct ifnet *ifp)
   11324 {
   11325 	struct wm_softc *sc = ifp->if_softc;
   11326 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11327 	uint32_t reg;
   11328 	int rc;
   11329 
   11330 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11331 		device_xname(sc->sc_dev), __func__));
   11332 
   11333 	KASSERT(WM_CORE_LOCKED(sc));
   11334 
   11335 	if ((sc->sc_if_flags & IFF_UP) == 0)
   11336 		return 0;
   11337 
   11338 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   11339 	if ((sc->sc_type == WM_T_82580)
   11340 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   11341 	    || (sc->sc_type == WM_T_I211)) {
   11342 		reg = CSR_READ(sc, WMREG_PHPM);
   11343 		reg &= ~PHPM_GO_LINK_D;
   11344 		CSR_WRITE(sc, WMREG_PHPM, reg);
   11345 	}
   11346 
   11347 	/* Disable D0 LPLU. */
   11348 	wm_lplu_d0_disable(sc);
   11349 
   11350 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   11351 	sc->sc_ctrl |= CTRL_SLU;
   11352 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11353 	    || (sc->sc_type > WM_T_82543)) {
   11354 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   11355 	} else {
   11356 		sc->sc_ctrl &= ~CTRL_ASDE;
   11357 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   11358 		if (ife->ifm_media & IFM_FDX)
   11359 			sc->sc_ctrl |= CTRL_FD;
   11360 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   11361 		case IFM_10_T:
   11362 			sc->sc_ctrl |= CTRL_SPEED_10;
   11363 			break;
   11364 		case IFM_100_TX:
   11365 			sc->sc_ctrl |= CTRL_SPEED_100;
   11366 			break;
   11367 		case IFM_1000_T:
   11368 			sc->sc_ctrl |= CTRL_SPEED_1000;
   11369 			break;
   11370 		case IFM_NONE:
   11371 			/* There is no specific setting for IFM_NONE */
   11372 			break;
   11373 		default:
   11374 			panic("wm_gmii_mediachange: bad media 0x%x",
   11375 			    ife->ifm_media);
   11376 		}
   11377 	}
   11378 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11379 	CSR_WRITE_FLUSH(sc);
   11380 
   11381 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11382 		wm_serdes_mediachange(ifp);
   11383 
   11384 	if (sc->sc_type <= WM_T_82543)
   11385 		wm_gmii_reset(sc);
   11386 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   11387 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   11388 		/* allow time for SFP cage time to power up phy */
   11389 		delay(300 * 1000);
   11390 		wm_gmii_reset(sc);
   11391 	}
   11392 
   11393 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   11394 		return 0;
   11395 	return rc;
   11396 }
   11397 
   11398 /*
   11399  * wm_gmii_mediastatus:	[ifmedia interface function]
   11400  *
   11401  *	Get the current interface media status on a 1000BASE-T device.
   11402  */
   11403 static void
   11404 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11405 {
   11406 	struct wm_softc *sc = ifp->if_softc;
   11407 
   11408 	KASSERT(WM_CORE_LOCKED(sc));
   11409 
   11410 	ether_mediastatus(ifp, ifmr);
   11411 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11412 	    | sc->sc_flowflags;
   11413 }
   11414 
   11415 #define	MDI_IO		CTRL_SWDPIN(2)
   11416 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   11417 #define	MDI_CLK		CTRL_SWDPIN(3)
   11418 
   11419 static void
   11420 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   11421 {
   11422 	uint32_t i, v;
   11423 
   11424 	v = CSR_READ(sc, WMREG_CTRL);
   11425 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11426 	v |= MDI_DIR | CTRL_SWDPIO(3);
   11427 
   11428 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   11429 		if (data & i)
   11430 			v |= MDI_IO;
   11431 		else
   11432 			v &= ~MDI_IO;
   11433 		CSR_WRITE(sc, WMREG_CTRL, v);
   11434 		CSR_WRITE_FLUSH(sc);
   11435 		delay(10);
   11436 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11437 		CSR_WRITE_FLUSH(sc);
   11438 		delay(10);
   11439 		CSR_WRITE(sc, WMREG_CTRL, v);
   11440 		CSR_WRITE_FLUSH(sc);
   11441 		delay(10);
   11442 	}
   11443 }
   11444 
   11445 static uint16_t
   11446 wm_i82543_mii_recvbits(struct wm_softc *sc)
   11447 {
   11448 	uint32_t v, i;
   11449 	uint16_t data = 0;
   11450 
   11451 	v = CSR_READ(sc, WMREG_CTRL);
   11452 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11453 	v |= CTRL_SWDPIO(3);
   11454 
   11455 	CSR_WRITE(sc, WMREG_CTRL, v);
   11456 	CSR_WRITE_FLUSH(sc);
   11457 	delay(10);
   11458 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11459 	CSR_WRITE_FLUSH(sc);
   11460 	delay(10);
   11461 	CSR_WRITE(sc, WMREG_CTRL, v);
   11462 	CSR_WRITE_FLUSH(sc);
   11463 	delay(10);
   11464 
   11465 	for (i = 0; i < 16; i++) {
   11466 		data <<= 1;
   11467 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11468 		CSR_WRITE_FLUSH(sc);
   11469 		delay(10);
   11470 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   11471 			data |= 1;
   11472 		CSR_WRITE(sc, WMREG_CTRL, v);
   11473 		CSR_WRITE_FLUSH(sc);
   11474 		delay(10);
   11475 	}
   11476 
   11477 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11478 	CSR_WRITE_FLUSH(sc);
   11479 	delay(10);
   11480 	CSR_WRITE(sc, WMREG_CTRL, v);
   11481 	CSR_WRITE_FLUSH(sc);
   11482 	delay(10);
   11483 
   11484 	return data;
   11485 }
   11486 
   11487 #undef MDI_IO
   11488 #undef MDI_DIR
   11489 #undef MDI_CLK
   11490 
   11491 /*
   11492  * wm_gmii_i82543_readreg:	[mii interface function]
   11493  *
   11494  *	Read a PHY register on the GMII (i82543 version).
   11495  */
   11496 static int
   11497 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11498 {
   11499 	struct wm_softc *sc = device_private(dev);
   11500 
   11501 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11502 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   11503 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   11504 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   11505 
   11506 	DPRINTF(sc, WM_DEBUG_GMII,
   11507 	    ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   11508 		device_xname(dev), phy, reg, *val));
   11509 
   11510 	return 0;
   11511 }
   11512 
   11513 /*
   11514  * wm_gmii_i82543_writereg:	[mii interface function]
   11515  *
   11516  *	Write a PHY register on the GMII (i82543 version).
   11517  */
   11518 static int
   11519 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   11520 {
   11521 	struct wm_softc *sc = device_private(dev);
   11522 
   11523 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11524 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   11525 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   11526 	    (MII_COMMAND_START << 30), 32);
   11527 
   11528 	return 0;
   11529 }
   11530 
   11531 /*
   11532  * wm_gmii_mdic_readreg:	[mii interface function]
   11533  *
   11534  *	Read a PHY register on the GMII.
   11535  */
   11536 static int
   11537 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11538 {
   11539 	struct wm_softc *sc = device_private(dev);
   11540 	uint32_t mdic = 0;
   11541 	int i;
   11542 
   11543 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11544 	    && (reg > MII_ADDRMASK)) {
   11545 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11546 		    __func__, sc->sc_phytype, reg);
   11547 		reg &= MII_ADDRMASK;
   11548 	}
   11549 
   11550 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   11551 	    MDIC_REGADD(reg));
   11552 
   11553 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11554 		delay(50);
   11555 		mdic = CSR_READ(sc, WMREG_MDIC);
   11556 		if (mdic & MDIC_READY)
   11557 			break;
   11558 	}
   11559 
   11560 	if ((mdic & MDIC_READY) == 0) {
   11561 		DPRINTF(sc, WM_DEBUG_GMII,
   11562 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   11563 			device_xname(dev), phy, reg));
   11564 		return ETIMEDOUT;
   11565 	} else if (mdic & MDIC_E) {
   11566 		/* This is normal if no PHY is present. */
   11567 		DPRINTF(sc, WM_DEBUG_GMII,
   11568 		    ("%s: MDIC read error: phy %d reg %d\n",
   11569 			device_xname(sc->sc_dev), phy, reg));
   11570 		return -1;
   11571 	} else
   11572 		*val = MDIC_DATA(mdic);
   11573 
   11574 	/*
   11575 	 * Allow some time after each MDIC transaction to avoid
   11576 	 * reading duplicate data in the next MDIC transaction.
   11577 	 */
   11578 	if (sc->sc_type == WM_T_PCH2)
   11579 		delay(100);
   11580 
   11581 	return 0;
   11582 }
   11583 
   11584 /*
   11585  * wm_gmii_mdic_writereg:	[mii interface function]
   11586  *
   11587  *	Write a PHY register on the GMII.
   11588  */
   11589 static int
   11590 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11591 {
   11592 	struct wm_softc *sc = device_private(dev);
   11593 	uint32_t mdic = 0;
   11594 	int i;
   11595 
   11596 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11597 	    && (reg > MII_ADDRMASK)) {
   11598 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11599 		    __func__, sc->sc_phytype, reg);
   11600 		reg &= MII_ADDRMASK;
   11601 	}
   11602 
   11603 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11604 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11605 
   11606 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11607 		delay(50);
   11608 		mdic = CSR_READ(sc, WMREG_MDIC);
   11609 		if (mdic & MDIC_READY)
   11610 			break;
   11611 	}
   11612 
   11613 	if ((mdic & MDIC_READY) == 0) {
   11614 		DPRINTF(sc, WM_DEBUG_GMII,
   11615 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11616 			device_xname(dev), phy, reg));
   11617 		return ETIMEDOUT;
   11618 	} else if (mdic & MDIC_E) {
   11619 		DPRINTF(sc, WM_DEBUG_GMII,
   11620 		    ("%s: MDIC write error: phy %d reg %d\n",
   11621 			device_xname(dev), phy, reg));
   11622 		return -1;
   11623 	}
   11624 
   11625 	/*
   11626 	 * Allow some time after each MDIC transaction to avoid
   11627 	 * reading duplicate data in the next MDIC transaction.
   11628 	 */
   11629 	if (sc->sc_type == WM_T_PCH2)
   11630 		delay(100);
   11631 
   11632 	return 0;
   11633 }
   11634 
   11635 /*
   11636  * wm_gmii_i82544_readreg:	[mii interface function]
   11637  *
   11638  *	Read a PHY register on the GMII.
   11639  */
   11640 static int
   11641 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11642 {
   11643 	struct wm_softc *sc = device_private(dev);
   11644 	int rv;
   11645 
   11646 	rv = sc->phy.acquire(sc);
   11647 	if (rv != 0) {
   11648 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11649 		return rv;
   11650 	}
   11651 
   11652 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11653 
   11654 	sc->phy.release(sc);
   11655 
   11656 	return rv;
   11657 }
   11658 
   11659 static int
   11660 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11661 {
   11662 	struct wm_softc *sc = device_private(dev);
   11663 	int rv;
   11664 
   11665 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11666 		switch (sc->sc_phytype) {
   11667 		case WMPHY_IGP:
   11668 		case WMPHY_IGP_2:
   11669 		case WMPHY_IGP_3:
   11670 			rv = wm_gmii_mdic_writereg(dev, phy,
   11671 			    IGPHY_PAGE_SELECT, reg);
   11672 			if (rv != 0)
   11673 				return rv;
   11674 			break;
   11675 		default:
   11676 #ifdef WM_DEBUG
   11677 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11678 			    __func__, sc->sc_phytype, reg);
   11679 #endif
   11680 			break;
   11681 		}
   11682 	}
   11683 
   11684 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11685 }
   11686 
   11687 /*
   11688  * wm_gmii_i82544_writereg:	[mii interface function]
   11689  *
   11690  *	Write a PHY register on the GMII.
   11691  */
   11692 static int
   11693 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11694 {
   11695 	struct wm_softc *sc = device_private(dev);
   11696 	int rv;
   11697 
   11698 	rv = sc->phy.acquire(sc);
   11699 	if (rv != 0) {
   11700 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11701 		return rv;
   11702 	}
   11703 
   11704 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11705 	sc->phy.release(sc);
   11706 
   11707 	return rv;
   11708 }
   11709 
   11710 static int
   11711 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11712 {
   11713 	struct wm_softc *sc = device_private(dev);
   11714 	int rv;
   11715 
   11716 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11717 		switch (sc->sc_phytype) {
   11718 		case WMPHY_IGP:
   11719 		case WMPHY_IGP_2:
   11720 		case WMPHY_IGP_3:
   11721 			rv = wm_gmii_mdic_writereg(dev, phy,
   11722 			    IGPHY_PAGE_SELECT, reg);
   11723 			if (rv != 0)
   11724 				return rv;
   11725 			break;
   11726 		default:
   11727 #ifdef WM_DEBUG
   11728 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11729 			    __func__, sc->sc_phytype, reg);
   11730 #endif
   11731 			break;
   11732 		}
   11733 	}
   11734 
   11735 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11736 }
   11737 
   11738 /*
   11739  * wm_gmii_i80003_readreg:	[mii interface function]
   11740  *
   11741  *	Read a PHY register on the kumeran
   11742  * This could be handled by the PHY layer if we didn't have to lock the
   11743  * resource ...
   11744  */
   11745 static int
   11746 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11747 {
   11748 	struct wm_softc *sc = device_private(dev);
   11749 	int page_select;
   11750 	uint16_t temp, temp2;
   11751 	int rv;
   11752 
   11753 	if (phy != 1) /* Only one PHY on kumeran bus */
   11754 		return -1;
   11755 
   11756 	rv = sc->phy.acquire(sc);
   11757 	if (rv != 0) {
   11758 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11759 		return rv;
   11760 	}
   11761 
   11762 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11763 		page_select = GG82563_PHY_PAGE_SELECT;
   11764 	else {
   11765 		/*
   11766 		 * Use Alternative Page Select register to access registers
   11767 		 * 30 and 31.
   11768 		 */
   11769 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11770 	}
   11771 	temp = reg >> GG82563_PAGE_SHIFT;
   11772 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11773 		goto out;
   11774 
   11775 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11776 		/*
   11777 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11778 		 * register.
   11779 		 */
   11780 		delay(200);
   11781 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11782 		if ((rv != 0) || (temp2 != temp)) {
   11783 			device_printf(dev, "%s failed\n", __func__);
   11784 			rv = -1;
   11785 			goto out;
   11786 		}
   11787 		delay(200);
   11788 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11789 		delay(200);
   11790 	} else
   11791 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11792 
   11793 out:
   11794 	sc->phy.release(sc);
   11795 	return rv;
   11796 }
   11797 
   11798 /*
   11799  * wm_gmii_i80003_writereg:	[mii interface function]
   11800  *
   11801  *	Write a PHY register on the kumeran.
   11802  * This could be handled by the PHY layer if we didn't have to lock the
   11803  * resource ...
   11804  */
   11805 static int
   11806 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11807 {
   11808 	struct wm_softc *sc = device_private(dev);
   11809 	int page_select, rv;
   11810 	uint16_t temp, temp2;
   11811 
   11812 	if (phy != 1) /* Only one PHY on kumeran bus */
   11813 		return -1;
   11814 
   11815 	rv = sc->phy.acquire(sc);
   11816 	if (rv != 0) {
   11817 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11818 		return rv;
   11819 	}
   11820 
   11821 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11822 		page_select = GG82563_PHY_PAGE_SELECT;
   11823 	else {
   11824 		/*
   11825 		 * Use Alternative Page Select register to access registers
   11826 		 * 30 and 31.
   11827 		 */
   11828 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11829 	}
   11830 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11831 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11832 		goto out;
   11833 
   11834 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11835 		/*
   11836 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11837 		 * register.
   11838 		 */
   11839 		delay(200);
   11840 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11841 		if ((rv != 0) || (temp2 != temp)) {
   11842 			device_printf(dev, "%s failed\n", __func__);
   11843 			rv = -1;
   11844 			goto out;
   11845 		}
   11846 		delay(200);
   11847 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11848 		delay(200);
   11849 	} else
   11850 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11851 
   11852 out:
   11853 	sc->phy.release(sc);
   11854 	return rv;
   11855 }
   11856 
   11857 /*
   11858  * wm_gmii_bm_readreg:	[mii interface function]
   11859  *
   11860  *	Read a PHY register on the kumeran
   11861  * This could be handled by the PHY layer if we didn't have to lock the
   11862  * resource ...
   11863  */
   11864 static int
   11865 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11866 {
   11867 	struct wm_softc *sc = device_private(dev);
   11868 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11869 	int rv;
   11870 
   11871 	rv = sc->phy.acquire(sc);
   11872 	if (rv != 0) {
   11873 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11874 		return rv;
   11875 	}
   11876 
   11877 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11878 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11879 		    || (reg == 31)) ? 1 : phy;
   11880 	/* Page 800 works differently than the rest so it has its own func */
   11881 	if (page == BM_WUC_PAGE) {
   11882 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11883 		goto release;
   11884 	}
   11885 
   11886 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11887 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11888 		    && (sc->sc_type != WM_T_82583))
   11889 			rv = wm_gmii_mdic_writereg(dev, phy,
   11890 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11891 		else
   11892 			rv = wm_gmii_mdic_writereg(dev, phy,
   11893 			    BME1000_PHY_PAGE_SELECT, page);
   11894 		if (rv != 0)
   11895 			goto release;
   11896 	}
   11897 
   11898 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11899 
   11900 release:
   11901 	sc->phy.release(sc);
   11902 	return rv;
   11903 }
   11904 
   11905 /*
   11906  * wm_gmii_bm_writereg:	[mii interface function]
   11907  *
   11908  *	Write a PHY register on the kumeran.
   11909  * This could be handled by the PHY layer if we didn't have to lock the
   11910  * resource ...
   11911  */
   11912 static int
   11913 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11914 {
   11915 	struct wm_softc *sc = device_private(dev);
   11916 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11917 	int rv;
   11918 
   11919 	rv = sc->phy.acquire(sc);
   11920 	if (rv != 0) {
   11921 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11922 		return rv;
   11923 	}
   11924 
   11925 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11926 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11927 		    || (reg == 31)) ? 1 : phy;
   11928 	/* Page 800 works differently than the rest so it has its own func */
   11929 	if (page == BM_WUC_PAGE) {
   11930 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11931 		goto release;
   11932 	}
   11933 
   11934 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11935 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11936 		    && (sc->sc_type != WM_T_82583))
   11937 			rv = wm_gmii_mdic_writereg(dev, phy,
   11938 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11939 		else
   11940 			rv = wm_gmii_mdic_writereg(dev, phy,
   11941 			    BME1000_PHY_PAGE_SELECT, page);
   11942 		if (rv != 0)
   11943 			goto release;
   11944 	}
   11945 
   11946 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11947 
   11948 release:
   11949 	sc->phy.release(sc);
   11950 	return rv;
   11951 }
   11952 
   11953 /*
   11954  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11955  *  @dev: pointer to the HW structure
   11956  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11957  *
   11958  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11959  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11960  */
   11961 static int
   11962 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11963 {
   11964 #ifdef WM_DEBUG
   11965 	struct wm_softc *sc = device_private(dev);
   11966 #endif
   11967 	uint16_t temp;
   11968 	int rv;
   11969 
   11970 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11971 		device_xname(dev), __func__));
   11972 
   11973 	if (!phy_regp)
   11974 		return -1;
   11975 
   11976 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11977 
   11978 	/* Select Port Control Registers page */
   11979 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11980 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11981 	if (rv != 0)
   11982 		return rv;
   11983 
   11984 	/* Read WUCE and save it */
   11985 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11986 	if (rv != 0)
   11987 		return rv;
   11988 
   11989 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11990 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11991 	 */
   11992 	temp = *phy_regp;
   11993 	temp |= BM_WUC_ENABLE_BIT;
   11994 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11995 
   11996 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11997 		return rv;
   11998 
   11999 	/* Select Host Wakeup Registers page - caller now able to write
   12000 	 * registers on the Wakeup registers page
   12001 	 */
   12002 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12003 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   12004 }
   12005 
   12006 /*
   12007  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   12008  *  @dev: pointer to the HW structure
   12009  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   12010  *
   12011  *  Restore BM_WUC_ENABLE_REG to its original value.
   12012  *
   12013  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   12014  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   12015  *  caller.
   12016  */
   12017 static int
   12018 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   12019 {
   12020 #ifdef WM_DEBUG
   12021 	struct wm_softc *sc = device_private(dev);
   12022 #endif
   12023 
   12024 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   12025 		device_xname(dev), __func__));
   12026 
   12027 	if (!phy_regp)
   12028 		return -1;
   12029 
   12030 	/* Select Port Control Registers page */
   12031 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12032 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   12033 
   12034 	/* Restore 769.17 to its original value */
   12035 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   12036 
   12037 	return 0;
   12038 }
   12039 
   12040 /*
   12041  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   12042  *  @sc: pointer to the HW structure
   12043  *  @offset: register offset to be read or written
   12044  *  @val: pointer to the data to read or write
   12045  *  @rd: determines if operation is read or write
   12046  *  @page_set: BM_WUC_PAGE already set and access enabled
   12047  *
   12048  *  Read the PHY register at offset and store the retrieved information in
   12049  *  data, or write data to PHY register at offset.  Note the procedure to
   12050  *  access the PHY wakeup registers is different than reading the other PHY
   12051  *  registers. It works as such:
   12052  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   12053  *  2) Set page to 800 for host (801 if we were manageability)
   12054  *  3) Write the address using the address opcode (0x11)
   12055  *  4) Read or write the data using the data opcode (0x12)
   12056  *  5) Restore 769.17.2 to its original value
   12057  *
   12058  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   12059  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   12060  *
   12061  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   12062  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   12063  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   12064  */
   12065 static int
   12066 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   12067 	bool page_set)
   12068 {
   12069 	struct wm_softc *sc = device_private(dev);
   12070 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   12071 	uint16_t page = BM_PHY_REG_PAGE(offset);
   12072 	uint16_t wuce;
   12073 	int rv = 0;
   12074 
   12075 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12076 		device_xname(dev), __func__));
   12077 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   12078 	if ((sc->sc_type == WM_T_PCH)
   12079 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   12080 		device_printf(dev,
   12081 		    "Attempting to access page %d while gig enabled.\n", page);
   12082 	}
   12083 
   12084 	if (!page_set) {
   12085 		/* Enable access to PHY wakeup registers */
   12086 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   12087 		if (rv != 0) {
   12088 			device_printf(dev,
   12089 			    "%s: Could not enable PHY wakeup reg access\n",
   12090 			    __func__);
   12091 			return rv;
   12092 		}
   12093 	}
   12094 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   12095 		device_xname(sc->sc_dev), __func__, page, regnum));
   12096 
   12097 	/*
   12098 	 * 2) Access PHY wakeup register.
   12099 	 * See wm_access_phy_wakeup_reg_bm.
   12100 	 */
   12101 
   12102 	/* Write the Wakeup register page offset value using opcode 0x11 */
   12103 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   12104 	if (rv != 0)
   12105 		return rv;
   12106 
   12107 	if (rd) {
   12108 		/* Read the Wakeup register page value using opcode 0x12 */
   12109 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   12110 	} else {
   12111 		/* Write the Wakeup register page value using opcode 0x12 */
   12112 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   12113 	}
   12114 	if (rv != 0)
   12115 		return rv;
   12116 
   12117 	if (!page_set)
   12118 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   12119 
   12120 	return rv;
   12121 }
   12122 
   12123 /*
   12124  * wm_gmii_hv_readreg:	[mii interface function]
   12125  *
   12126  *	Read a PHY register on the kumeran
   12127  * This could be handled by the PHY layer if we didn't have to lock the
   12128  * resource ...
   12129  */
   12130 static int
   12131 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12132 {
   12133 	struct wm_softc *sc = device_private(dev);
   12134 	int rv;
   12135 
   12136 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12137 		device_xname(dev), __func__));
   12138 
   12139 	rv = sc->phy.acquire(sc);
   12140 	if (rv != 0) {
   12141 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12142 		return rv;
   12143 	}
   12144 
   12145 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   12146 	sc->phy.release(sc);
   12147 	return rv;
   12148 }
   12149 
   12150 static int
   12151 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12152 {
   12153 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12154 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12155 	int rv;
   12156 
   12157 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12158 
   12159 	/* Page 800 works differently than the rest so it has its own func */
   12160 	if (page == BM_WUC_PAGE)
   12161 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   12162 
   12163 	/*
   12164 	 * Lower than page 768 works differently than the rest so it has its
   12165 	 * own func
   12166 	 */
   12167 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12168 		device_printf(dev, "gmii_hv_readreg!!!\n");
   12169 		return -1;
   12170 	}
   12171 
   12172 	/*
   12173 	 * XXX I21[789] documents say that the SMBus Address register is at
   12174 	 * PHY address 01, Page 0 (not 768), Register 26.
   12175 	 */
   12176 	if (page == HV_INTC_FC_PAGE_START)
   12177 		page = 0;
   12178 
   12179 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12180 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   12181 		    page << BME1000_PAGE_SHIFT);
   12182 		if (rv != 0)
   12183 			return rv;
   12184 	}
   12185 
   12186 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   12187 }
   12188 
   12189 /*
   12190  * wm_gmii_hv_writereg:	[mii interface function]
   12191  *
   12192  *	Write a PHY register on the kumeran.
   12193  * This could be handled by the PHY layer if we didn't have to lock the
   12194  * resource ...
   12195  */
   12196 static int
   12197 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   12198 {
   12199 	struct wm_softc *sc = device_private(dev);
   12200 	int rv;
   12201 
   12202 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   12203 		device_xname(dev), __func__));
   12204 
   12205 	rv = sc->phy.acquire(sc);
   12206 	if (rv != 0) {
   12207 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12208 		return rv;
   12209 	}
   12210 
   12211 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   12212 	sc->phy.release(sc);
   12213 
   12214 	return rv;
   12215 }
   12216 
   12217 static int
   12218 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12219 {
   12220 	struct wm_softc *sc = device_private(dev);
   12221 	uint16_t page = BM_PHY_REG_PAGE(reg);
   12222 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   12223 	int rv;
   12224 
   12225 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   12226 
   12227 	/* Page 800 works differently than the rest so it has its own func */
   12228 	if (page == BM_WUC_PAGE)
   12229 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   12230 		    false);
   12231 
   12232 	/*
   12233 	 * Lower than page 768 works differently than the rest so it has its
   12234 	 * own func
   12235 	 */
   12236 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   12237 		device_printf(dev, "gmii_hv_writereg!!!\n");
   12238 		return -1;
   12239 	}
   12240 
   12241 	{
   12242 		/*
   12243 		 * XXX I21[789] documents say that the SMBus Address register
   12244 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   12245 		 */
   12246 		if (page == HV_INTC_FC_PAGE_START)
   12247 			page = 0;
   12248 
   12249 		/*
   12250 		 * XXX Workaround MDIO accesses being disabled after entering
   12251 		 * IEEE Power Down (whenever bit 11 of the PHY control
   12252 		 * register is set)
   12253 		 */
   12254 		if (sc->sc_phytype == WMPHY_82578) {
   12255 			struct mii_softc *child;
   12256 
   12257 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12258 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   12259 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   12260 			    && ((val & (1 << 11)) != 0)) {
   12261 				device_printf(dev, "XXX need workaround\n");
   12262 			}
   12263 		}
   12264 
   12265 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   12266 			rv = wm_gmii_mdic_writereg(dev, 1,
   12267 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   12268 			if (rv != 0)
   12269 				return rv;
   12270 		}
   12271 	}
   12272 
   12273 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   12274 }
   12275 
   12276 /*
   12277  * wm_gmii_82580_readreg:	[mii interface function]
   12278  *
   12279  *	Read a PHY register on the 82580 and I350.
   12280  * This could be handled by the PHY layer if we didn't have to lock the
   12281  * resource ...
   12282  */
   12283 static int
   12284 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12285 {
   12286 	struct wm_softc *sc = device_private(dev);
   12287 	int rv;
   12288 
   12289 	rv = sc->phy.acquire(sc);
   12290 	if (rv != 0) {
   12291 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12292 		return rv;
   12293 	}
   12294 
   12295 #ifdef DIAGNOSTIC
   12296 	if (reg > MII_ADDRMASK) {
   12297 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12298 		    __func__, sc->sc_phytype, reg);
   12299 		reg &= MII_ADDRMASK;
   12300 	}
   12301 #endif
   12302 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   12303 
   12304 	sc->phy.release(sc);
   12305 	return rv;
   12306 }
   12307 
   12308 /*
   12309  * wm_gmii_82580_writereg:	[mii interface function]
   12310  *
   12311  *	Write a PHY register on the 82580 and I350.
   12312  * This could be handled by the PHY layer if we didn't have to lock the
   12313  * resource ...
   12314  */
   12315 static int
   12316 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   12317 {
   12318 	struct wm_softc *sc = device_private(dev);
   12319 	int rv;
   12320 
   12321 	rv = sc->phy.acquire(sc);
   12322 	if (rv != 0) {
   12323 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12324 		return rv;
   12325 	}
   12326 
   12327 #ifdef DIAGNOSTIC
   12328 	if (reg > MII_ADDRMASK) {
   12329 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   12330 		    __func__, sc->sc_phytype, reg);
   12331 		reg &= MII_ADDRMASK;
   12332 	}
   12333 #endif
   12334 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   12335 
   12336 	sc->phy.release(sc);
   12337 	return rv;
   12338 }
   12339 
   12340 /*
   12341  * wm_gmii_gs40g_readreg:	[mii interface function]
   12342  *
   12343  *	Read a PHY register on the I2100 and I211.
   12344  * This could be handled by the PHY layer if we didn't have to lock the
   12345  * resource ...
   12346  */
   12347 static int
   12348 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12349 {
   12350 	struct wm_softc *sc = device_private(dev);
   12351 	int page, offset;
   12352 	int rv;
   12353 
   12354 	/* Acquire semaphore */
   12355 	rv = sc->phy.acquire(sc);
   12356 	if (rv != 0) {
   12357 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12358 		return rv;
   12359 	}
   12360 
   12361 	/* Page select */
   12362 	page = reg >> GS40G_PAGE_SHIFT;
   12363 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12364 	if (rv != 0)
   12365 		goto release;
   12366 
   12367 	/* Read reg */
   12368 	offset = reg & GS40G_OFFSET_MASK;
   12369 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   12370 
   12371 release:
   12372 	sc->phy.release(sc);
   12373 	return rv;
   12374 }
   12375 
   12376 /*
   12377  * wm_gmii_gs40g_writereg:	[mii interface function]
   12378  *
   12379  *	Write a PHY register on the I210 and I211.
   12380  * This could be handled by the PHY layer if we didn't have to lock the
   12381  * resource ...
   12382  */
   12383 static int
   12384 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   12385 {
   12386 	struct wm_softc *sc = device_private(dev);
   12387 	uint16_t page;
   12388 	int offset, rv;
   12389 
   12390 	/* Acquire semaphore */
   12391 	rv = sc->phy.acquire(sc);
   12392 	if (rv != 0) {
   12393 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12394 		return rv;
   12395 	}
   12396 
   12397 	/* Page select */
   12398 	page = reg >> GS40G_PAGE_SHIFT;
   12399 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   12400 	if (rv != 0)
   12401 		goto release;
   12402 
   12403 	/* Write reg */
   12404 	offset = reg & GS40G_OFFSET_MASK;
   12405 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   12406 
   12407 release:
   12408 	/* Release semaphore */
   12409 	sc->phy.release(sc);
   12410 	return rv;
   12411 }
   12412 
   12413 /*
   12414  * wm_gmii_statchg:	[mii interface function]
   12415  *
   12416  *	Callback from MII layer when media changes.
   12417  */
   12418 static void
   12419 wm_gmii_statchg(struct ifnet *ifp)
   12420 {
   12421 	struct wm_softc *sc = ifp->if_softc;
   12422 	struct mii_data *mii = &sc->sc_mii;
   12423 
   12424 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   12425 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12426 	sc->sc_fcrtl &= ~FCRTL_XONE;
   12427 
   12428 	/* Get flow control negotiation result. */
   12429 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   12430 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   12431 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   12432 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   12433 	}
   12434 
   12435 	if (sc->sc_flowflags & IFM_FLOW) {
   12436 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   12437 			sc->sc_ctrl |= CTRL_TFCE;
   12438 			sc->sc_fcrtl |= FCRTL_XONE;
   12439 		}
   12440 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   12441 			sc->sc_ctrl |= CTRL_RFCE;
   12442 	}
   12443 
   12444 	if (mii->mii_media_active & IFM_FDX) {
   12445 		DPRINTF(sc, WM_DEBUG_LINK,
   12446 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   12447 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12448 	} else {
   12449 		DPRINTF(sc, WM_DEBUG_LINK,
   12450 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   12451 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12452 	}
   12453 
   12454 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12455 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12456 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   12457 						 : WMREG_FCRTL, sc->sc_fcrtl);
   12458 	if (sc->sc_type == WM_T_80003) {
   12459 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   12460 		case IFM_1000_T:
   12461 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12462 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   12463 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   12464 			break;
   12465 		default:
   12466 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12467 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   12468 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   12469 			break;
   12470 		}
   12471 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   12472 	}
   12473 }
   12474 
   12475 /* kumeran related (80003, ICH* and PCH*) */
   12476 
   12477 /*
   12478  * wm_kmrn_readreg:
   12479  *
   12480  *	Read a kumeran register
   12481  */
   12482 static int
   12483 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   12484 {
   12485 	int rv;
   12486 
   12487 	if (sc->sc_type == WM_T_80003)
   12488 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12489 	else
   12490 		rv = sc->phy.acquire(sc);
   12491 	if (rv != 0) {
   12492 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12493 		    __func__);
   12494 		return rv;
   12495 	}
   12496 
   12497 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   12498 
   12499 	if (sc->sc_type == WM_T_80003)
   12500 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12501 	else
   12502 		sc->phy.release(sc);
   12503 
   12504 	return rv;
   12505 }
   12506 
   12507 static int
   12508 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   12509 {
   12510 
   12511 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12512 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   12513 	    KUMCTRLSTA_REN);
   12514 	CSR_WRITE_FLUSH(sc);
   12515 	delay(2);
   12516 
   12517 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   12518 
   12519 	return 0;
   12520 }
   12521 
   12522 /*
   12523  * wm_kmrn_writereg:
   12524  *
   12525  *	Write a kumeran register
   12526  */
   12527 static int
   12528 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   12529 {
   12530 	int rv;
   12531 
   12532 	if (sc->sc_type == WM_T_80003)
   12533 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12534 	else
   12535 		rv = sc->phy.acquire(sc);
   12536 	if (rv != 0) {
   12537 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12538 		    __func__);
   12539 		return rv;
   12540 	}
   12541 
   12542 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   12543 
   12544 	if (sc->sc_type == WM_T_80003)
   12545 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12546 	else
   12547 		sc->phy.release(sc);
   12548 
   12549 	return rv;
   12550 }
   12551 
   12552 static int
   12553 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   12554 {
   12555 
   12556 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12557 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   12558 
   12559 	return 0;
   12560 }
   12561 
   12562 /*
   12563  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   12564  * This access method is different from IEEE MMD.
   12565  */
   12566 static int
   12567 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   12568 {
   12569 	struct wm_softc *sc = device_private(dev);
   12570 	int rv;
   12571 
   12572 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   12573 	if (rv != 0)
   12574 		return rv;
   12575 
   12576 	if (rd)
   12577 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   12578 	else
   12579 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12580 	return rv;
   12581 }
   12582 
   12583 static int
   12584 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12585 {
   12586 
   12587 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12588 }
   12589 
   12590 static int
   12591 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12592 {
   12593 
   12594 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12595 }
   12596 
   12597 /* SGMII related */
   12598 
   12599 /*
   12600  * wm_sgmii_uses_mdio
   12601  *
   12602  * Check whether the transaction is to the internal PHY or the external
   12603  * MDIO interface. Return true if it's MDIO.
   12604  */
   12605 static bool
   12606 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12607 {
   12608 	uint32_t reg;
   12609 	bool ismdio = false;
   12610 
   12611 	switch (sc->sc_type) {
   12612 	case WM_T_82575:
   12613 	case WM_T_82576:
   12614 		reg = CSR_READ(sc, WMREG_MDIC);
   12615 		ismdio = ((reg & MDIC_DEST) != 0);
   12616 		break;
   12617 	case WM_T_82580:
   12618 	case WM_T_I350:
   12619 	case WM_T_I354:
   12620 	case WM_T_I210:
   12621 	case WM_T_I211:
   12622 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12623 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12624 		break;
   12625 	default:
   12626 		break;
   12627 	}
   12628 
   12629 	return ismdio;
   12630 }
   12631 
   12632 /* Setup internal SGMII PHY for SFP */
   12633 static void
   12634 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12635 {
   12636 	uint16_t id1, id2, phyreg;
   12637 	int i, rv;
   12638 
   12639 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12640 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12641 		return;
   12642 
   12643 	for (i = 0; i < MII_NPHY; i++) {
   12644 		sc->phy.no_errprint = true;
   12645 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12646 		if (rv != 0)
   12647 			continue;
   12648 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12649 		if (rv != 0)
   12650 			continue;
   12651 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12652 			continue;
   12653 		sc->phy.no_errprint = false;
   12654 
   12655 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12656 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12657 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12658 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12659 		break;
   12660 	}
   12661 
   12662 }
   12663 
   12664 /*
   12665  * wm_sgmii_readreg:	[mii interface function]
   12666  *
   12667  *	Read a PHY register on the SGMII
   12668  * This could be handled by the PHY layer if we didn't have to lock the
   12669  * resource ...
   12670  */
   12671 static int
   12672 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12673 {
   12674 	struct wm_softc *sc = device_private(dev);
   12675 	int rv;
   12676 
   12677 	rv = sc->phy.acquire(sc);
   12678 	if (rv != 0) {
   12679 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12680 		return rv;
   12681 	}
   12682 
   12683 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12684 
   12685 	sc->phy.release(sc);
   12686 	return rv;
   12687 }
   12688 
   12689 static int
   12690 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12691 {
   12692 	struct wm_softc *sc = device_private(dev);
   12693 	uint32_t i2ccmd;
   12694 	int i, rv = 0;
   12695 
   12696 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12697 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12698 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12699 
   12700 	/* Poll the ready bit */
   12701 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12702 		delay(50);
   12703 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12704 		if (i2ccmd & I2CCMD_READY)
   12705 			break;
   12706 	}
   12707 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12708 		device_printf(dev, "I2CCMD Read did not complete\n");
   12709 		rv = ETIMEDOUT;
   12710 	}
   12711 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12712 		if (!sc->phy.no_errprint)
   12713 			device_printf(dev, "I2CCMD Error bit set\n");
   12714 		rv = EIO;
   12715 	}
   12716 
   12717 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12718 
   12719 	return rv;
   12720 }
   12721 
   12722 /*
   12723  * wm_sgmii_writereg:	[mii interface function]
   12724  *
   12725  *	Write a PHY register on the SGMII.
   12726  * This could be handled by the PHY layer if we didn't have to lock the
   12727  * resource ...
   12728  */
   12729 static int
   12730 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12731 {
   12732 	struct wm_softc *sc = device_private(dev);
   12733 	int rv;
   12734 
   12735 	rv = sc->phy.acquire(sc);
   12736 	if (rv != 0) {
   12737 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12738 		return rv;
   12739 	}
   12740 
   12741 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12742 
   12743 	sc->phy.release(sc);
   12744 
   12745 	return rv;
   12746 }
   12747 
   12748 static int
   12749 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12750 {
   12751 	struct wm_softc *sc = device_private(dev);
   12752 	uint32_t i2ccmd;
   12753 	uint16_t swapdata;
   12754 	int rv = 0;
   12755 	int i;
   12756 
   12757 	/* Swap the data bytes for the I2C interface */
   12758 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12759 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12760 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12761 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12762 
   12763 	/* Poll the ready bit */
   12764 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12765 		delay(50);
   12766 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12767 		if (i2ccmd & I2CCMD_READY)
   12768 			break;
   12769 	}
   12770 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12771 		device_printf(dev, "I2CCMD Write did not complete\n");
   12772 		rv = ETIMEDOUT;
   12773 	}
   12774 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12775 		device_printf(dev, "I2CCMD Error bit set\n");
   12776 		rv = EIO;
   12777 	}
   12778 
   12779 	return rv;
   12780 }
   12781 
   12782 /* TBI related */
   12783 
   12784 static bool
   12785 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12786 {
   12787 	bool sig;
   12788 
   12789 	sig = ctrl & CTRL_SWDPIN(1);
   12790 
   12791 	/*
   12792 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12793 	 * detect a signal, 1 if they don't.
   12794 	 */
   12795 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12796 		sig = !sig;
   12797 
   12798 	return sig;
   12799 }
   12800 
   12801 /*
   12802  * wm_tbi_mediainit:
   12803  *
   12804  *	Initialize media for use on 1000BASE-X devices.
   12805  */
   12806 static void
   12807 wm_tbi_mediainit(struct wm_softc *sc)
   12808 {
   12809 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12810 	const char *sep = "";
   12811 
   12812 	if (sc->sc_type < WM_T_82543)
   12813 		sc->sc_tipg = TIPG_WM_DFLT;
   12814 	else
   12815 		sc->sc_tipg = TIPG_LG_DFLT;
   12816 
   12817 	sc->sc_tbi_serdes_anegticks = 5;
   12818 
   12819 	/* Initialize our media structures */
   12820 	sc->sc_mii.mii_ifp = ifp;
   12821 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12822 
   12823 	ifp->if_baudrate = IF_Gbps(1);
   12824 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12825 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12826 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12827 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12828 		    sc->sc_core_lock);
   12829 	} else {
   12830 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12831 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12832 	}
   12833 
   12834 	/*
   12835 	 * SWD Pins:
   12836 	 *
   12837 	 *	0 = Link LED (output)
   12838 	 *	1 = Loss Of Signal (input)
   12839 	 */
   12840 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12841 
   12842 	/* XXX Perhaps this is only for TBI */
   12843 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12844 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12845 
   12846 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12847 		sc->sc_ctrl &= ~CTRL_LRST;
   12848 
   12849 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12850 
   12851 #define	ADD(ss, mm, dd)							  \
   12852 do {									  \
   12853 	aprint_normal("%s%s", sep, ss);					  \
   12854 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12855 	sep = ", ";							  \
   12856 } while (/*CONSTCOND*/0)
   12857 
   12858 	aprint_normal_dev(sc->sc_dev, "");
   12859 
   12860 	if (sc->sc_type == WM_T_I354) {
   12861 		uint32_t status;
   12862 
   12863 		status = CSR_READ(sc, WMREG_STATUS);
   12864 		if (((status & STATUS_2P5_SKU) != 0)
   12865 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12866 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12867 		} else
   12868 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12869 	} else if (sc->sc_type == WM_T_82545) {
   12870 		/* Only 82545 is LX (XXX except SFP) */
   12871 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12872 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12873 	} else if (sc->sc_sfptype != 0) {
   12874 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12875 		switch (sc->sc_sfptype) {
   12876 		default:
   12877 		case SFF_SFP_ETH_FLAGS_1000SX:
   12878 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12879 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12880 			break;
   12881 		case SFF_SFP_ETH_FLAGS_1000LX:
   12882 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12883 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12884 			break;
   12885 		case SFF_SFP_ETH_FLAGS_1000CX:
   12886 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12887 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12888 			break;
   12889 		case SFF_SFP_ETH_FLAGS_1000T:
   12890 			ADD("1000baseT", IFM_1000_T, 0);
   12891 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12892 			break;
   12893 		case SFF_SFP_ETH_FLAGS_100FX:
   12894 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12895 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12896 			break;
   12897 		}
   12898 	} else {
   12899 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12900 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12901 	}
   12902 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12903 	aprint_normal("\n");
   12904 
   12905 #undef ADD
   12906 
   12907 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12908 }
   12909 
   12910 /*
   12911  * wm_tbi_mediachange:	[ifmedia interface function]
   12912  *
   12913  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12914  */
   12915 static int
   12916 wm_tbi_mediachange(struct ifnet *ifp)
   12917 {
   12918 	struct wm_softc *sc = ifp->if_softc;
   12919 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12920 	uint32_t status, ctrl;
   12921 	bool signal;
   12922 	int i;
   12923 
   12924 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12925 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12926 		/* XXX need some work for >= 82571 and < 82575 */
   12927 		if (sc->sc_type < WM_T_82575)
   12928 			return 0;
   12929 	}
   12930 
   12931 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12932 	    || (sc->sc_type >= WM_T_82575))
   12933 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12934 
   12935 	sc->sc_ctrl &= ~CTRL_LRST;
   12936 	sc->sc_txcw = TXCW_ANE;
   12937 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12938 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12939 	else if (ife->ifm_media & IFM_FDX)
   12940 		sc->sc_txcw |= TXCW_FD;
   12941 	else
   12942 		sc->sc_txcw |= TXCW_HD;
   12943 
   12944 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12945 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12946 
   12947 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12948 		device_xname(sc->sc_dev), sc->sc_txcw));
   12949 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12950 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12951 	CSR_WRITE_FLUSH(sc);
   12952 	delay(1000);
   12953 
   12954 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12955 	signal = wm_tbi_havesignal(sc, ctrl);
   12956 
   12957 	DPRINTF(sc, WM_DEBUG_LINK,
   12958 	    ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
   12959 
   12960 	if (signal) {
   12961 		/* Have signal; wait for the link to come up. */
   12962 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12963 			delay(10000);
   12964 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12965 				break;
   12966 		}
   12967 
   12968 		DPRINTF(sc, WM_DEBUG_LINK,
   12969 		    ("%s: i = %d after waiting for link\n",
   12970 			device_xname(sc->sc_dev), i));
   12971 
   12972 		status = CSR_READ(sc, WMREG_STATUS);
   12973 		DPRINTF(sc, WM_DEBUG_LINK,
   12974 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
   12975 			__PRIxBIT "\n",
   12976 			device_xname(sc->sc_dev), status, STATUS_LU));
   12977 		if (status & STATUS_LU) {
   12978 			/* Link is up. */
   12979 			DPRINTF(sc, WM_DEBUG_LINK,
   12980 			    ("%s: LINK: set media -> link up %s\n",
   12981 				device_xname(sc->sc_dev),
   12982 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12983 
   12984 			/*
   12985 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12986 			 * so we should update sc->sc_ctrl
   12987 			 */
   12988 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12989 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12990 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12991 			if (status & STATUS_FD)
   12992 				sc->sc_tctl |=
   12993 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12994 			else
   12995 				sc->sc_tctl |=
   12996 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12997 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12998 				sc->sc_fcrtl |= FCRTL_XONE;
   12999 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   13000 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   13001 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   13002 			sc->sc_tbi_linkup = 1;
   13003 		} else {
   13004 			if (i == WM_LINKUP_TIMEOUT)
   13005 				wm_check_for_link(sc);
   13006 			/* Link is down. */
   13007 			DPRINTF(sc, WM_DEBUG_LINK,
   13008 			    ("%s: LINK: set media -> link down\n",
   13009 				device_xname(sc->sc_dev)));
   13010 			sc->sc_tbi_linkup = 0;
   13011 		}
   13012 	} else {
   13013 		DPRINTF(sc, WM_DEBUG_LINK,
   13014 		    ("%s: LINK: set media -> no signal\n",
   13015 			device_xname(sc->sc_dev)));
   13016 		sc->sc_tbi_linkup = 0;
   13017 	}
   13018 
   13019 	wm_tbi_serdes_set_linkled(sc);
   13020 
   13021 	return 0;
   13022 }
   13023 
   13024 /*
   13025  * wm_tbi_mediastatus:	[ifmedia interface function]
   13026  *
   13027  *	Get the current interface media status on a 1000BASE-X device.
   13028  */
   13029 static void
   13030 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13031 {
   13032 	struct wm_softc *sc = ifp->if_softc;
   13033 	uint32_t ctrl, status;
   13034 
   13035 	ifmr->ifm_status = IFM_AVALID;
   13036 	ifmr->ifm_active = IFM_ETHER;
   13037 
   13038 	status = CSR_READ(sc, WMREG_STATUS);
   13039 	if ((status & STATUS_LU) == 0) {
   13040 		ifmr->ifm_active |= IFM_NONE;
   13041 		return;
   13042 	}
   13043 
   13044 	ifmr->ifm_status |= IFM_ACTIVE;
   13045 	/* Only 82545 is LX */
   13046 	if (sc->sc_type == WM_T_82545)
   13047 		ifmr->ifm_active |= IFM_1000_LX;
   13048 	else
   13049 		ifmr->ifm_active |= IFM_1000_SX;
   13050 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   13051 		ifmr->ifm_active |= IFM_FDX;
   13052 	else
   13053 		ifmr->ifm_active |= IFM_HDX;
   13054 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13055 	if (ctrl & CTRL_RFCE)
   13056 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   13057 	if (ctrl & CTRL_TFCE)
   13058 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   13059 }
   13060 
   13061 /* XXX TBI only */
   13062 static int
   13063 wm_check_for_link(struct wm_softc *sc)
   13064 {
   13065 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   13066 	uint32_t rxcw;
   13067 	uint32_t ctrl;
   13068 	uint32_t status;
   13069 	bool signal;
   13070 
   13071 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   13072 		device_xname(sc->sc_dev), __func__));
   13073 
   13074 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   13075 		/* XXX need some work for >= 82571 */
   13076 		if (sc->sc_type >= WM_T_82571) {
   13077 			sc->sc_tbi_linkup = 1;
   13078 			return 0;
   13079 		}
   13080 	}
   13081 
   13082 	rxcw = CSR_READ(sc, WMREG_RXCW);
   13083 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13084 	status = CSR_READ(sc, WMREG_STATUS);
   13085 	signal = wm_tbi_havesignal(sc, ctrl);
   13086 
   13087 	DPRINTF(sc, WM_DEBUG_LINK,
   13088 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   13089 		device_xname(sc->sc_dev), __func__, signal,
   13090 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   13091 
   13092 	/*
   13093 	 * SWDPIN   LU RXCW
   13094 	 *	0    0	  0
   13095 	 *	0    0	  1	(should not happen)
   13096 	 *	0    1	  0	(should not happen)
   13097 	 *	0    1	  1	(should not happen)
   13098 	 *	1    0	  0	Disable autonego and force linkup
   13099 	 *	1    0	  1	got /C/ but not linkup yet
   13100 	 *	1    1	  0	(linkup)
   13101 	 *	1    1	  1	If IFM_AUTO, back to autonego
   13102 	 *
   13103 	 */
   13104 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   13105 		DPRINTF(sc, WM_DEBUG_LINK,
   13106 		    ("%s: %s: force linkup and fullduplex\n",
   13107 			device_xname(sc->sc_dev), __func__));
   13108 		sc->sc_tbi_linkup = 0;
   13109 		/* Disable auto-negotiation in the TXCW register */
   13110 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   13111 
   13112 		/*
   13113 		 * Force link-up and also force full-duplex.
   13114 		 *
   13115 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   13116 		 * so we should update sc->sc_ctrl
   13117 		 */
   13118 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   13119 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13120 	} else if (((status & STATUS_LU) != 0)
   13121 	    && ((rxcw & RXCW_C) != 0)
   13122 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   13123 		sc->sc_tbi_linkup = 1;
   13124 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   13125 			device_xname(sc->sc_dev), __func__));
   13126 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13127 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   13128 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   13129 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   13130 			device_xname(sc->sc_dev), __func__));
   13131 	} else {
   13132 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   13133 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   13134 			status));
   13135 	}
   13136 
   13137 	return 0;
   13138 }
   13139 
   13140 /*
   13141  * wm_tbi_tick:
   13142  *
   13143  *	Check the link on TBI devices.
   13144  *	This function acts as mii_tick().
   13145  */
   13146 static void
   13147 wm_tbi_tick(struct wm_softc *sc)
   13148 {
   13149 	struct mii_data *mii = &sc->sc_mii;
   13150 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13151 	uint32_t status;
   13152 
   13153 	KASSERT(WM_CORE_LOCKED(sc));
   13154 
   13155 	status = CSR_READ(sc, WMREG_STATUS);
   13156 
   13157 	/* XXX is this needed? */
   13158 	(void)CSR_READ(sc, WMREG_RXCW);
   13159 	(void)CSR_READ(sc, WMREG_CTRL);
   13160 
   13161 	/* set link status */
   13162 	if ((status & STATUS_LU) == 0) {
   13163 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   13164 			device_xname(sc->sc_dev)));
   13165 		sc->sc_tbi_linkup = 0;
   13166 	} else if (sc->sc_tbi_linkup == 0) {
   13167 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   13168 			device_xname(sc->sc_dev),
   13169 			(status & STATUS_FD) ? "FDX" : "HDX"));
   13170 		sc->sc_tbi_linkup = 1;
   13171 		sc->sc_tbi_serdes_ticks = 0;
   13172 	}
   13173 
   13174 	if ((sc->sc_if_flags & IFF_UP) == 0)
   13175 		goto setled;
   13176 
   13177 	if ((status & STATUS_LU) == 0) {
   13178 		sc->sc_tbi_linkup = 0;
   13179 		/* If the timer expired, retry autonegotiation */
   13180 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13181 		    && (++sc->sc_tbi_serdes_ticks
   13182 			>= sc->sc_tbi_serdes_anegticks)) {
   13183 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13184 				device_xname(sc->sc_dev), __func__));
   13185 			sc->sc_tbi_serdes_ticks = 0;
   13186 			/*
   13187 			 * Reset the link, and let autonegotiation do
   13188 			 * its thing
   13189 			 */
   13190 			sc->sc_ctrl |= CTRL_LRST;
   13191 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13192 			CSR_WRITE_FLUSH(sc);
   13193 			delay(1000);
   13194 			sc->sc_ctrl &= ~CTRL_LRST;
   13195 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13196 			CSR_WRITE_FLUSH(sc);
   13197 			delay(1000);
   13198 			CSR_WRITE(sc, WMREG_TXCW,
   13199 			    sc->sc_txcw & ~TXCW_ANE);
   13200 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   13201 		}
   13202 	}
   13203 
   13204 setled:
   13205 	wm_tbi_serdes_set_linkled(sc);
   13206 }
   13207 
   13208 /* SERDES related */
   13209 static void
   13210 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   13211 {
   13212 	uint32_t reg;
   13213 
   13214 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13215 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13216 		return;
   13217 
   13218 	/* Enable PCS to turn on link */
   13219 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   13220 	reg |= PCS_CFG_PCS_EN;
   13221 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   13222 
   13223 	/* Power up the laser */
   13224 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13225 	reg &= ~CTRL_EXT_SWDPIN(3);
   13226 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13227 
   13228 	/* Flush the write to verify completion */
   13229 	CSR_WRITE_FLUSH(sc);
   13230 	delay(1000);
   13231 }
   13232 
   13233 static int
   13234 wm_serdes_mediachange(struct ifnet *ifp)
   13235 {
   13236 	struct wm_softc *sc = ifp->if_softc;
   13237 	bool pcs_autoneg = true; /* XXX */
   13238 	uint32_t ctrl_ext, pcs_lctl, reg;
   13239 
   13240 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   13241 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   13242 		return 0;
   13243 
   13244 	/* XXX Currently, this function is not called on 8257[12] */
   13245 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   13246 	    || (sc->sc_type >= WM_T_82575))
   13247 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   13248 
   13249 	/* Power on the sfp cage if present */
   13250 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13251 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13252 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   13253 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13254 
   13255 	sc->sc_ctrl |= CTRL_SLU;
   13256 
   13257 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   13258 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   13259 
   13260 		reg = CSR_READ(sc, WMREG_CONNSW);
   13261 		reg |= CONNSW_ENRGSRC;
   13262 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   13263 	}
   13264 
   13265 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   13266 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   13267 	case CTRL_EXT_LINK_MODE_SGMII:
   13268 		/* SGMII mode lets the phy handle forcing speed/duplex */
   13269 		pcs_autoneg = true;
   13270 		/* Autoneg time out should be disabled for SGMII mode */
   13271 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   13272 		break;
   13273 	case CTRL_EXT_LINK_MODE_1000KX:
   13274 		pcs_autoneg = false;
   13275 		/* FALLTHROUGH */
   13276 	default:
   13277 		if ((sc->sc_type == WM_T_82575)
   13278 		    || (sc->sc_type == WM_T_82576)) {
   13279 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   13280 				pcs_autoneg = false;
   13281 		}
   13282 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   13283 		    | CTRL_FRCFDX;
   13284 
   13285 		/* Set speed of 1000/Full if speed/duplex is forced */
   13286 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   13287 	}
   13288 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   13289 
   13290 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   13291 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   13292 
   13293 	if (pcs_autoneg) {
   13294 		/* Set PCS register for autoneg */
   13295 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   13296 
   13297 		/* Disable force flow control for autoneg */
   13298 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   13299 
   13300 		/* Configure flow control advertisement for autoneg */
   13301 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   13302 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   13303 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   13304 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   13305 	} else
   13306 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   13307 
   13308 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   13309 
   13310 	return 0;
   13311 }
   13312 
   13313 static void
   13314 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   13315 {
   13316 	struct wm_softc *sc = ifp->if_softc;
   13317 	struct mii_data *mii = &sc->sc_mii;
   13318 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13319 	uint32_t pcs_adv, pcs_lpab, reg;
   13320 
   13321 	ifmr->ifm_status = IFM_AVALID;
   13322 	ifmr->ifm_active = IFM_ETHER;
   13323 
   13324 	/* Check PCS */
   13325 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13326 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   13327 		ifmr->ifm_active |= IFM_NONE;
   13328 		sc->sc_tbi_linkup = 0;
   13329 		goto setled;
   13330 	}
   13331 
   13332 	sc->sc_tbi_linkup = 1;
   13333 	ifmr->ifm_status |= IFM_ACTIVE;
   13334 	if (sc->sc_type == WM_T_I354) {
   13335 		uint32_t status;
   13336 
   13337 		status = CSR_READ(sc, WMREG_STATUS);
   13338 		if (((status & STATUS_2P5_SKU) != 0)
   13339 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   13340 			ifmr->ifm_active |= IFM_2500_KX;
   13341 		} else
   13342 			ifmr->ifm_active |= IFM_1000_KX;
   13343 	} else {
   13344 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   13345 		case PCS_LSTS_SPEED_10:
   13346 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   13347 			break;
   13348 		case PCS_LSTS_SPEED_100:
   13349 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   13350 			break;
   13351 		case PCS_LSTS_SPEED_1000:
   13352 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13353 			break;
   13354 		default:
   13355 			device_printf(sc->sc_dev, "Unknown speed\n");
   13356 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   13357 			break;
   13358 		}
   13359 	}
   13360 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   13361 	if ((reg & PCS_LSTS_FDX) != 0)
   13362 		ifmr->ifm_active |= IFM_FDX;
   13363 	else
   13364 		ifmr->ifm_active |= IFM_HDX;
   13365 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   13366 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   13367 		/* Check flow */
   13368 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13369 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   13370 			DPRINTF(sc, WM_DEBUG_LINK,
   13371 			    ("XXX LINKOK but not ACOMP\n"));
   13372 			goto setled;
   13373 		}
   13374 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   13375 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   13376 		DPRINTF(sc, WM_DEBUG_LINK,
   13377 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   13378 		if ((pcs_adv & TXCW_SYM_PAUSE)
   13379 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   13380 			mii->mii_media_active |= IFM_FLOW
   13381 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   13382 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   13383 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13384 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   13385 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13386 			mii->mii_media_active |= IFM_FLOW
   13387 			    | IFM_ETH_TXPAUSE;
   13388 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   13389 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   13390 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   13391 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   13392 			mii->mii_media_active |= IFM_FLOW
   13393 			    | IFM_ETH_RXPAUSE;
   13394 		}
   13395 	}
   13396 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   13397 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   13398 setled:
   13399 	wm_tbi_serdes_set_linkled(sc);
   13400 }
   13401 
   13402 /*
   13403  * wm_serdes_tick:
   13404  *
   13405  *	Check the link on serdes devices.
   13406  */
   13407 static void
   13408 wm_serdes_tick(struct wm_softc *sc)
   13409 {
   13410 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   13411 	struct mii_data *mii = &sc->sc_mii;
   13412 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   13413 	uint32_t reg;
   13414 
   13415 	KASSERT(WM_CORE_LOCKED(sc));
   13416 
   13417 	mii->mii_media_status = IFM_AVALID;
   13418 	mii->mii_media_active = IFM_ETHER;
   13419 
   13420 	/* Check PCS */
   13421 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13422 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   13423 		mii->mii_media_status |= IFM_ACTIVE;
   13424 		sc->sc_tbi_linkup = 1;
   13425 		sc->sc_tbi_serdes_ticks = 0;
   13426 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   13427 		if ((reg & PCS_LSTS_FDX) != 0)
   13428 			mii->mii_media_active |= IFM_FDX;
   13429 		else
   13430 			mii->mii_media_active |= IFM_HDX;
   13431 	} else {
   13432 		mii->mii_media_status |= IFM_NONE;
   13433 		sc->sc_tbi_linkup = 0;
   13434 		/* If the timer expired, retry autonegotiation */
   13435 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13436 		    && (++sc->sc_tbi_serdes_ticks
   13437 			>= sc->sc_tbi_serdes_anegticks)) {
   13438 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13439 				device_xname(sc->sc_dev), __func__));
   13440 			sc->sc_tbi_serdes_ticks = 0;
   13441 			/* XXX */
   13442 			wm_serdes_mediachange(ifp);
   13443 		}
   13444 	}
   13445 
   13446 	wm_tbi_serdes_set_linkled(sc);
   13447 }
   13448 
   13449 /* SFP related */
   13450 
   13451 static int
   13452 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   13453 {
   13454 	uint32_t i2ccmd;
   13455 	int i;
   13456 
   13457 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13458 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13459 
   13460 	/* Poll the ready bit */
   13461 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13462 		delay(50);
   13463 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13464 		if (i2ccmd & I2CCMD_READY)
   13465 			break;
   13466 	}
   13467 	if ((i2ccmd & I2CCMD_READY) == 0)
   13468 		return -1;
   13469 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   13470 		return -1;
   13471 
   13472 	*data = i2ccmd & 0x00ff;
   13473 
   13474 	return 0;
   13475 }
   13476 
   13477 static uint32_t
   13478 wm_sfp_get_media_type(struct wm_softc *sc)
   13479 {
   13480 	uint32_t ctrl_ext;
   13481 	uint8_t val = 0;
   13482 	int timeout = 3;
   13483 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   13484 	int rv = -1;
   13485 
   13486 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13487 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13488 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   13489 	CSR_WRITE_FLUSH(sc);
   13490 
   13491 	/* Read SFP module data */
   13492 	while (timeout) {
   13493 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   13494 		if (rv == 0)
   13495 			break;
   13496 		delay(100*1000); /* XXX too big */
   13497 		timeout--;
   13498 	}
   13499 	if (rv != 0)
   13500 		goto out;
   13501 
   13502 	switch (val) {
   13503 	case SFF_SFP_ID_SFF:
   13504 		aprint_normal_dev(sc->sc_dev,
   13505 		    "Module/Connector soldered to board\n");
   13506 		break;
   13507 	case SFF_SFP_ID_SFP:
   13508 		sc->sc_flags |= WM_F_SFP;
   13509 		break;
   13510 	case SFF_SFP_ID_UNKNOWN:
   13511 		goto out;
   13512 	default:
   13513 		break;
   13514 	}
   13515 
   13516 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   13517 	if (rv != 0)
   13518 		goto out;
   13519 
   13520 	sc->sc_sfptype = val;
   13521 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   13522 		mediatype = WM_MEDIATYPE_SERDES;
   13523 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   13524 		sc->sc_flags |= WM_F_SGMII;
   13525 		mediatype = WM_MEDIATYPE_COPPER;
   13526 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   13527 		sc->sc_flags |= WM_F_SGMII;
   13528 		mediatype = WM_MEDIATYPE_SERDES;
   13529 	} else {
   13530 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   13531 		    __func__, sc->sc_sfptype);
   13532 		sc->sc_sfptype = 0; /* XXX unknown */
   13533 	}
   13534 
   13535 out:
   13536 	/* Restore I2C interface setting */
   13537 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13538 
   13539 	return mediatype;
   13540 }
   13541 
   13542 /*
   13543  * NVM related.
   13544  * Microwire, SPI (w/wo EERD) and Flash.
   13545  */
   13546 
   13547 /* Both spi and uwire */
   13548 
   13549 /*
   13550  * wm_eeprom_sendbits:
   13551  *
   13552  *	Send a series of bits to the EEPROM.
   13553  */
   13554 static void
   13555 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   13556 {
   13557 	uint32_t reg;
   13558 	int x;
   13559 
   13560 	reg = CSR_READ(sc, WMREG_EECD);
   13561 
   13562 	for (x = nbits; x > 0; x--) {
   13563 		if (bits & (1U << (x - 1)))
   13564 			reg |= EECD_DI;
   13565 		else
   13566 			reg &= ~EECD_DI;
   13567 		CSR_WRITE(sc, WMREG_EECD, reg);
   13568 		CSR_WRITE_FLUSH(sc);
   13569 		delay(2);
   13570 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13571 		CSR_WRITE_FLUSH(sc);
   13572 		delay(2);
   13573 		CSR_WRITE(sc, WMREG_EECD, reg);
   13574 		CSR_WRITE_FLUSH(sc);
   13575 		delay(2);
   13576 	}
   13577 }
   13578 
   13579 /*
   13580  * wm_eeprom_recvbits:
   13581  *
   13582  *	Receive a series of bits from the EEPROM.
   13583  */
   13584 static void
   13585 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13586 {
   13587 	uint32_t reg, val;
   13588 	int x;
   13589 
   13590 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13591 
   13592 	val = 0;
   13593 	for (x = nbits; x > 0; x--) {
   13594 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13595 		CSR_WRITE_FLUSH(sc);
   13596 		delay(2);
   13597 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13598 			val |= (1U << (x - 1));
   13599 		CSR_WRITE(sc, WMREG_EECD, reg);
   13600 		CSR_WRITE_FLUSH(sc);
   13601 		delay(2);
   13602 	}
   13603 	*valp = val;
   13604 }
   13605 
   13606 /* Microwire */
   13607 
   13608 /*
   13609  * wm_nvm_read_uwire:
   13610  *
   13611  *	Read a word from the EEPROM using the MicroWire protocol.
   13612  */
   13613 static int
   13614 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13615 {
   13616 	uint32_t reg, val;
   13617 	int i, rv;
   13618 
   13619 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13620 		device_xname(sc->sc_dev), __func__));
   13621 
   13622 	rv = sc->nvm.acquire(sc);
   13623 	if (rv != 0)
   13624 		return rv;
   13625 
   13626 	for (i = 0; i < wordcnt; i++) {
   13627 		/* Clear SK and DI. */
   13628 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13629 		CSR_WRITE(sc, WMREG_EECD, reg);
   13630 
   13631 		/*
   13632 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13633 		 * and Xen.
   13634 		 *
   13635 		 * We use this workaround only for 82540 because qemu's
   13636 		 * e1000 act as 82540.
   13637 		 */
   13638 		if (sc->sc_type == WM_T_82540) {
   13639 			reg |= EECD_SK;
   13640 			CSR_WRITE(sc, WMREG_EECD, reg);
   13641 			reg &= ~EECD_SK;
   13642 			CSR_WRITE(sc, WMREG_EECD, reg);
   13643 			CSR_WRITE_FLUSH(sc);
   13644 			delay(2);
   13645 		}
   13646 		/* XXX: end of workaround */
   13647 
   13648 		/* Set CHIP SELECT. */
   13649 		reg |= EECD_CS;
   13650 		CSR_WRITE(sc, WMREG_EECD, reg);
   13651 		CSR_WRITE_FLUSH(sc);
   13652 		delay(2);
   13653 
   13654 		/* Shift in the READ command. */
   13655 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13656 
   13657 		/* Shift in address. */
   13658 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13659 
   13660 		/* Shift out the data. */
   13661 		wm_eeprom_recvbits(sc, &val, 16);
   13662 		data[i] = val & 0xffff;
   13663 
   13664 		/* Clear CHIP SELECT. */
   13665 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13666 		CSR_WRITE(sc, WMREG_EECD, reg);
   13667 		CSR_WRITE_FLUSH(sc);
   13668 		delay(2);
   13669 	}
   13670 
   13671 	sc->nvm.release(sc);
   13672 	return 0;
   13673 }
   13674 
   13675 /* SPI */
   13676 
   13677 /*
   13678  * Set SPI and FLASH related information from the EECD register.
   13679  * For 82541 and 82547, the word size is taken from EEPROM.
   13680  */
   13681 static int
   13682 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13683 {
   13684 	int size;
   13685 	uint32_t reg;
   13686 	uint16_t data;
   13687 
   13688 	reg = CSR_READ(sc, WMREG_EECD);
   13689 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13690 
   13691 	/* Read the size of NVM from EECD by default */
   13692 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13693 	switch (sc->sc_type) {
   13694 	case WM_T_82541:
   13695 	case WM_T_82541_2:
   13696 	case WM_T_82547:
   13697 	case WM_T_82547_2:
   13698 		/* Set dummy value to access EEPROM */
   13699 		sc->sc_nvm_wordsize = 64;
   13700 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13701 			aprint_error_dev(sc->sc_dev,
   13702 			    "%s: failed to read EEPROM size\n", __func__);
   13703 		}
   13704 		reg = data;
   13705 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13706 		if (size == 0)
   13707 			size = 6; /* 64 word size */
   13708 		else
   13709 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13710 		break;
   13711 	case WM_T_80003:
   13712 	case WM_T_82571:
   13713 	case WM_T_82572:
   13714 	case WM_T_82573: /* SPI case */
   13715 	case WM_T_82574: /* SPI case */
   13716 	case WM_T_82583: /* SPI case */
   13717 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13718 		if (size > 14)
   13719 			size = 14;
   13720 		break;
   13721 	case WM_T_82575:
   13722 	case WM_T_82576:
   13723 	case WM_T_82580:
   13724 	case WM_T_I350:
   13725 	case WM_T_I354:
   13726 	case WM_T_I210:
   13727 	case WM_T_I211:
   13728 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13729 		if (size > 15)
   13730 			size = 15;
   13731 		break;
   13732 	default:
   13733 		aprint_error_dev(sc->sc_dev,
   13734 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13735 		return -1;
   13736 		break;
   13737 	}
   13738 
   13739 	sc->sc_nvm_wordsize = 1 << size;
   13740 
   13741 	return 0;
   13742 }
   13743 
   13744 /*
   13745  * wm_nvm_ready_spi:
   13746  *
   13747  *	Wait for a SPI EEPROM to be ready for commands.
   13748  */
   13749 static int
   13750 wm_nvm_ready_spi(struct wm_softc *sc)
   13751 {
   13752 	uint32_t val;
   13753 	int usec;
   13754 
   13755 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13756 		device_xname(sc->sc_dev), __func__));
   13757 
   13758 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13759 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13760 		wm_eeprom_recvbits(sc, &val, 8);
   13761 		if ((val & SPI_SR_RDY) == 0)
   13762 			break;
   13763 	}
   13764 	if (usec >= SPI_MAX_RETRIES) {
   13765 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13766 		return -1;
   13767 	}
   13768 	return 0;
   13769 }
   13770 
   13771 /*
   13772  * wm_nvm_read_spi:
   13773  *
   13774  *	Read a work from the EEPROM using the SPI protocol.
   13775  */
   13776 static int
   13777 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13778 {
   13779 	uint32_t reg, val;
   13780 	int i;
   13781 	uint8_t opc;
   13782 	int rv;
   13783 
   13784 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13785 		device_xname(sc->sc_dev), __func__));
   13786 
   13787 	rv = sc->nvm.acquire(sc);
   13788 	if (rv != 0)
   13789 		return rv;
   13790 
   13791 	/* Clear SK and CS. */
   13792 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13793 	CSR_WRITE(sc, WMREG_EECD, reg);
   13794 	CSR_WRITE_FLUSH(sc);
   13795 	delay(2);
   13796 
   13797 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13798 		goto out;
   13799 
   13800 	/* Toggle CS to flush commands. */
   13801 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13802 	CSR_WRITE_FLUSH(sc);
   13803 	delay(2);
   13804 	CSR_WRITE(sc, WMREG_EECD, reg);
   13805 	CSR_WRITE_FLUSH(sc);
   13806 	delay(2);
   13807 
   13808 	opc = SPI_OPC_READ;
   13809 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13810 		opc |= SPI_OPC_A8;
   13811 
   13812 	wm_eeprom_sendbits(sc, opc, 8);
   13813 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13814 
   13815 	for (i = 0; i < wordcnt; i++) {
   13816 		wm_eeprom_recvbits(sc, &val, 16);
   13817 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13818 	}
   13819 
   13820 	/* Raise CS and clear SK. */
   13821 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13822 	CSR_WRITE(sc, WMREG_EECD, reg);
   13823 	CSR_WRITE_FLUSH(sc);
   13824 	delay(2);
   13825 
   13826 out:
   13827 	sc->nvm.release(sc);
   13828 	return rv;
   13829 }
   13830 
   13831 /* Using with EERD */
   13832 
   13833 static int
   13834 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13835 {
   13836 	uint32_t attempts = 100000;
   13837 	uint32_t i, reg = 0;
   13838 	int32_t done = -1;
   13839 
   13840 	for (i = 0; i < attempts; i++) {
   13841 		reg = CSR_READ(sc, rw);
   13842 
   13843 		if (reg & EERD_DONE) {
   13844 			done = 0;
   13845 			break;
   13846 		}
   13847 		delay(5);
   13848 	}
   13849 
   13850 	return done;
   13851 }
   13852 
   13853 static int
   13854 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13855 {
   13856 	int i, eerd = 0;
   13857 	int rv;
   13858 
   13859 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13860 		device_xname(sc->sc_dev), __func__));
   13861 
   13862 	rv = sc->nvm.acquire(sc);
   13863 	if (rv != 0)
   13864 		return rv;
   13865 
   13866 	for (i = 0; i < wordcnt; i++) {
   13867 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13868 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13869 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13870 		if (rv != 0) {
   13871 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13872 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13873 			break;
   13874 		}
   13875 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13876 	}
   13877 
   13878 	sc->nvm.release(sc);
   13879 	return rv;
   13880 }
   13881 
   13882 /* Flash */
   13883 
   13884 static int
   13885 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13886 {
   13887 	uint32_t eecd;
   13888 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13889 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13890 	uint32_t nvm_dword = 0;
   13891 	uint8_t sig_byte = 0;
   13892 	int rv;
   13893 
   13894 	switch (sc->sc_type) {
   13895 	case WM_T_PCH_SPT:
   13896 	case WM_T_PCH_CNP:
   13897 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13898 		act_offset = ICH_NVM_SIG_WORD * 2;
   13899 
   13900 		/* Set bank to 0 in case flash read fails. */
   13901 		*bank = 0;
   13902 
   13903 		/* Check bank 0 */
   13904 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13905 		if (rv != 0)
   13906 			return rv;
   13907 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13908 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13909 			*bank = 0;
   13910 			return 0;
   13911 		}
   13912 
   13913 		/* Check bank 1 */
   13914 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13915 		    &nvm_dword);
   13916 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13917 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13918 			*bank = 1;
   13919 			return 0;
   13920 		}
   13921 		aprint_error_dev(sc->sc_dev,
   13922 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13923 		return -1;
   13924 	case WM_T_ICH8:
   13925 	case WM_T_ICH9:
   13926 		eecd = CSR_READ(sc, WMREG_EECD);
   13927 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13928 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13929 			return 0;
   13930 		}
   13931 		/* FALLTHROUGH */
   13932 	default:
   13933 		/* Default to 0 */
   13934 		*bank = 0;
   13935 
   13936 		/* Check bank 0 */
   13937 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13938 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13939 			*bank = 0;
   13940 			return 0;
   13941 		}
   13942 
   13943 		/* Check bank 1 */
   13944 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13945 		    &sig_byte);
   13946 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13947 			*bank = 1;
   13948 			return 0;
   13949 		}
   13950 	}
   13951 
   13952 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13953 		device_xname(sc->sc_dev)));
   13954 	return -1;
   13955 }
   13956 
   13957 /******************************************************************************
   13958  * This function does initial flash setup so that a new read/write/erase cycle
   13959  * can be started.
   13960  *
   13961  * sc - The pointer to the hw structure
   13962  ****************************************************************************/
   13963 static int32_t
   13964 wm_ich8_cycle_init(struct wm_softc *sc)
   13965 {
   13966 	uint16_t hsfsts;
   13967 	int32_t error = 1;
   13968 	int32_t i     = 0;
   13969 
   13970 	if (sc->sc_type >= WM_T_PCH_SPT)
   13971 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13972 	else
   13973 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13974 
   13975 	/* May be check the Flash Des Valid bit in Hw status */
   13976 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13977 		return error;
   13978 
   13979 	/* Clear FCERR in Hw status by writing 1 */
   13980 	/* Clear DAEL in Hw status by writing a 1 */
   13981 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13982 
   13983 	if (sc->sc_type >= WM_T_PCH_SPT)
   13984 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13985 	else
   13986 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13987 
   13988 	/*
   13989 	 * Either we should have a hardware SPI cycle in progress bit to check
   13990 	 * against, in order to start a new cycle or FDONE bit should be
   13991 	 * changed in the hardware so that it is 1 after hardware reset, which
   13992 	 * can then be used as an indication whether a cycle is in progress or
   13993 	 * has been completed .. we should also have some software semaphore
   13994 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13995 	 * threads access to those bits can be sequentiallized or a way so that
   13996 	 * 2 threads don't start the cycle at the same time
   13997 	 */
   13998 
   13999 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14000 		/*
   14001 		 * There is no cycle running at present, so we can start a
   14002 		 * cycle
   14003 		 */
   14004 
   14005 		/* Begin by setting Flash Cycle Done. */
   14006 		hsfsts |= HSFSTS_DONE;
   14007 		if (sc->sc_type >= WM_T_PCH_SPT)
   14008 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14009 			    hsfsts & 0xffffUL);
   14010 		else
   14011 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   14012 		error = 0;
   14013 	} else {
   14014 		/*
   14015 		 * Otherwise poll for sometime so the current cycle has a
   14016 		 * chance to end before giving up.
   14017 		 */
   14018 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   14019 			if (sc->sc_type >= WM_T_PCH_SPT)
   14020 				hsfsts = ICH8_FLASH_READ32(sc,
   14021 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14022 			else
   14023 				hsfsts = ICH8_FLASH_READ16(sc,
   14024 				    ICH_FLASH_HSFSTS);
   14025 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   14026 				error = 0;
   14027 				break;
   14028 			}
   14029 			delay(1);
   14030 		}
   14031 		if (error == 0) {
   14032 			/*
   14033 			 * Successful in waiting for previous cycle to timeout,
   14034 			 * now set the Flash Cycle Done.
   14035 			 */
   14036 			hsfsts |= HSFSTS_DONE;
   14037 			if (sc->sc_type >= WM_T_PCH_SPT)
   14038 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14039 				    hsfsts & 0xffffUL);
   14040 			else
   14041 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   14042 				    hsfsts);
   14043 		}
   14044 	}
   14045 	return error;
   14046 }
   14047 
   14048 /******************************************************************************
   14049  * This function starts a flash cycle and waits for its completion
   14050  *
   14051  * sc - The pointer to the hw structure
   14052  ****************************************************************************/
   14053 static int32_t
   14054 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   14055 {
   14056 	uint16_t hsflctl;
   14057 	uint16_t hsfsts;
   14058 	int32_t error = 1;
   14059 	uint32_t i = 0;
   14060 
   14061 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   14062 	if (sc->sc_type >= WM_T_PCH_SPT)
   14063 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   14064 	else
   14065 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14066 	hsflctl |= HSFCTL_GO;
   14067 	if (sc->sc_type >= WM_T_PCH_SPT)
   14068 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14069 		    (uint32_t)hsflctl << 16);
   14070 	else
   14071 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14072 
   14073 	/* Wait till FDONE bit is set to 1 */
   14074 	do {
   14075 		if (sc->sc_type >= WM_T_PCH_SPT)
   14076 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14077 			    & 0xffffUL;
   14078 		else
   14079 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   14080 		if (hsfsts & HSFSTS_DONE)
   14081 			break;
   14082 		delay(1);
   14083 		i++;
   14084 	} while (i < timeout);
   14085 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   14086 		error = 0;
   14087 
   14088 	return error;
   14089 }
   14090 
   14091 /******************************************************************************
   14092  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   14093  *
   14094  * sc - The pointer to the hw structure
   14095  * index - The index of the byte or word to read.
   14096  * size - Size of data to read, 1=byte 2=word, 4=dword
   14097  * data - Pointer to the word to store the value read.
   14098  *****************************************************************************/
   14099 static int32_t
   14100 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   14101     uint32_t size, uint32_t *data)
   14102 {
   14103 	uint16_t hsfsts;
   14104 	uint16_t hsflctl;
   14105 	uint32_t flash_linear_address;
   14106 	uint32_t flash_data = 0;
   14107 	int32_t error = 1;
   14108 	int32_t count = 0;
   14109 
   14110 	if (size < 1  || size > 4 || data == 0x0 ||
   14111 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   14112 		return error;
   14113 
   14114 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   14115 	    sc->sc_ich8_flash_base;
   14116 
   14117 	do {
   14118 		delay(1);
   14119 		/* Steps */
   14120 		error = wm_ich8_cycle_init(sc);
   14121 		if (error)
   14122 			break;
   14123 
   14124 		if (sc->sc_type >= WM_T_PCH_SPT)
   14125 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   14126 			    >> 16;
   14127 		else
   14128 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   14129 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   14130 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   14131 		    & HSFCTL_BCOUNT_MASK;
   14132 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   14133 		if (sc->sc_type >= WM_T_PCH_SPT) {
   14134 			/*
   14135 			 * In SPT, This register is in Lan memory space, not
   14136 			 * flash. Therefore, only 32 bit access is supported.
   14137 			 */
   14138 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   14139 			    (uint32_t)hsflctl << 16);
   14140 		} else
   14141 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   14142 
   14143 		/*
   14144 		 * Write the last 24 bits of index into Flash Linear address
   14145 		 * field in Flash Address
   14146 		 */
   14147 		/* TODO: TBD maybe check the index against the size of flash */
   14148 
   14149 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   14150 
   14151 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   14152 
   14153 		/*
   14154 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   14155 		 * the whole sequence a few more times, else read in (shift in)
   14156 		 * the Flash Data0, the order is least significant byte first
   14157 		 * msb to lsb
   14158 		 */
   14159 		if (error == 0) {
   14160 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   14161 			if (size == 1)
   14162 				*data = (uint8_t)(flash_data & 0x000000FF);
   14163 			else if (size == 2)
   14164 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   14165 			else if (size == 4)
   14166 				*data = (uint32_t)flash_data;
   14167 			break;
   14168 		} else {
   14169 			/*
   14170 			 * If we've gotten here, then things are probably
   14171 			 * completely hosed, but if the error condition is
   14172 			 * detected, it won't hurt to give it another try...
   14173 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   14174 			 */
   14175 			if (sc->sc_type >= WM_T_PCH_SPT)
   14176 				hsfsts = ICH8_FLASH_READ32(sc,
   14177 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   14178 			else
   14179 				hsfsts = ICH8_FLASH_READ16(sc,
   14180 				    ICH_FLASH_HSFSTS);
   14181 
   14182 			if (hsfsts & HSFSTS_ERR) {
   14183 				/* Repeat for some time before giving up. */
   14184 				continue;
   14185 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   14186 				break;
   14187 		}
   14188 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   14189 
   14190 	return error;
   14191 }
   14192 
   14193 /******************************************************************************
   14194  * Reads a single byte from the NVM using the ICH8 flash access registers.
   14195  *
   14196  * sc - pointer to wm_hw structure
   14197  * index - The index of the byte to read.
   14198  * data - Pointer to a byte to store the value read.
   14199  *****************************************************************************/
   14200 static int32_t
   14201 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   14202 {
   14203 	int32_t status;
   14204 	uint32_t word = 0;
   14205 
   14206 	status = wm_read_ich8_data(sc, index, 1, &word);
   14207 	if (status == 0)
   14208 		*data = (uint8_t)word;
   14209 	else
   14210 		*data = 0;
   14211 
   14212 	return status;
   14213 }
   14214 
   14215 /******************************************************************************
   14216  * Reads a word from the NVM using the ICH8 flash access registers.
   14217  *
   14218  * sc - pointer to wm_hw structure
   14219  * index - The starting byte index of the word to read.
   14220  * data - Pointer to a word to store the value read.
   14221  *****************************************************************************/
   14222 static int32_t
   14223 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   14224 {
   14225 	int32_t status;
   14226 	uint32_t word = 0;
   14227 
   14228 	status = wm_read_ich8_data(sc, index, 2, &word);
   14229 	if (status == 0)
   14230 		*data = (uint16_t)word;
   14231 	else
   14232 		*data = 0;
   14233 
   14234 	return status;
   14235 }
   14236 
   14237 /******************************************************************************
   14238  * Reads a dword from the NVM using the ICH8 flash access registers.
   14239  *
   14240  * sc - pointer to wm_hw structure
   14241  * index - The starting byte index of the word to read.
   14242  * data - Pointer to a word to store the value read.
   14243  *****************************************************************************/
   14244 static int32_t
   14245 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   14246 {
   14247 	int32_t status;
   14248 
   14249 	status = wm_read_ich8_data(sc, index, 4, data);
   14250 	return status;
   14251 }
   14252 
   14253 /******************************************************************************
   14254  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   14255  * register.
   14256  *
   14257  * sc - Struct containing variables accessed by shared code
   14258  * offset - offset of word in the EEPROM to read
   14259  * data - word read from the EEPROM
   14260  * words - number of words to read
   14261  *****************************************************************************/
   14262 static int
   14263 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14264 {
   14265 	int rv;
   14266 	uint32_t flash_bank = 0;
   14267 	uint32_t act_offset = 0;
   14268 	uint32_t bank_offset = 0;
   14269 	uint16_t word = 0;
   14270 	uint16_t i = 0;
   14271 
   14272 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14273 		device_xname(sc->sc_dev), __func__));
   14274 
   14275 	rv = sc->nvm.acquire(sc);
   14276 	if (rv != 0)
   14277 		return rv;
   14278 
   14279 	/*
   14280 	 * We need to know which is the valid flash bank.  In the event
   14281 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14282 	 * managing flash_bank. So it cannot be trusted and needs
   14283 	 * to be updated with each read.
   14284 	 */
   14285 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14286 	if (rv) {
   14287 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14288 			device_xname(sc->sc_dev)));
   14289 		flash_bank = 0;
   14290 	}
   14291 
   14292 	/*
   14293 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14294 	 * size
   14295 	 */
   14296 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14297 
   14298 	for (i = 0; i < words; i++) {
   14299 		/* The NVM part needs a byte offset, hence * 2 */
   14300 		act_offset = bank_offset + ((offset + i) * 2);
   14301 		rv = wm_read_ich8_word(sc, act_offset, &word);
   14302 		if (rv) {
   14303 			aprint_error_dev(sc->sc_dev,
   14304 			    "%s: failed to read NVM\n", __func__);
   14305 			break;
   14306 		}
   14307 		data[i] = word;
   14308 	}
   14309 
   14310 	sc->nvm.release(sc);
   14311 	return rv;
   14312 }
   14313 
   14314 /******************************************************************************
   14315  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   14316  * register.
   14317  *
   14318  * sc - Struct containing variables accessed by shared code
   14319  * offset - offset of word in the EEPROM to read
   14320  * data - word read from the EEPROM
   14321  * words - number of words to read
   14322  *****************************************************************************/
   14323 static int
   14324 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14325 {
   14326 	int	 rv;
   14327 	uint32_t flash_bank = 0;
   14328 	uint32_t act_offset = 0;
   14329 	uint32_t bank_offset = 0;
   14330 	uint32_t dword = 0;
   14331 	uint16_t i = 0;
   14332 
   14333 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14334 		device_xname(sc->sc_dev), __func__));
   14335 
   14336 	rv = sc->nvm.acquire(sc);
   14337 	if (rv != 0)
   14338 		return rv;
   14339 
   14340 	/*
   14341 	 * We need to know which is the valid flash bank.  In the event
   14342 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   14343 	 * managing flash_bank. So it cannot be trusted and needs
   14344 	 * to be updated with each read.
   14345 	 */
   14346 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   14347 	if (rv) {
   14348 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   14349 			device_xname(sc->sc_dev)));
   14350 		flash_bank = 0;
   14351 	}
   14352 
   14353 	/*
   14354 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   14355 	 * size
   14356 	 */
   14357 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   14358 
   14359 	for (i = 0; i < words; i++) {
   14360 		/* The NVM part needs a byte offset, hence * 2 */
   14361 		act_offset = bank_offset + ((offset + i) * 2);
   14362 		/* but we must read dword aligned, so mask ... */
   14363 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   14364 		if (rv) {
   14365 			aprint_error_dev(sc->sc_dev,
   14366 			    "%s: failed to read NVM\n", __func__);
   14367 			break;
   14368 		}
   14369 		/* ... and pick out low or high word */
   14370 		if ((act_offset & 0x2) == 0)
   14371 			data[i] = (uint16_t)(dword & 0xFFFF);
   14372 		else
   14373 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   14374 	}
   14375 
   14376 	sc->nvm.release(sc);
   14377 	return rv;
   14378 }
   14379 
   14380 /* iNVM */
   14381 
   14382 static int
   14383 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   14384 {
   14385 	int32_t	 rv = 0;
   14386 	uint32_t invm_dword;
   14387 	uint16_t i;
   14388 	uint8_t record_type, word_address;
   14389 
   14390 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14391 		device_xname(sc->sc_dev), __func__));
   14392 
   14393 	for (i = 0; i < INVM_SIZE; i++) {
   14394 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   14395 		/* Get record type */
   14396 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   14397 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   14398 			break;
   14399 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   14400 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   14401 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   14402 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   14403 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   14404 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   14405 			if (word_address == address) {
   14406 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   14407 				rv = 0;
   14408 				break;
   14409 			}
   14410 		}
   14411 	}
   14412 
   14413 	return rv;
   14414 }
   14415 
   14416 static int
   14417 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   14418 {
   14419 	int i, rv;
   14420 
   14421 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14422 		device_xname(sc->sc_dev), __func__));
   14423 
   14424 	rv = sc->nvm.acquire(sc);
   14425 	if (rv != 0)
   14426 		return rv;
   14427 
   14428 	for (i = 0; i < words; i++) {
   14429 		switch (offset + i) {
   14430 		case NVM_OFF_MACADDR:
   14431 		case NVM_OFF_MACADDR1:
   14432 		case NVM_OFF_MACADDR2:
   14433 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   14434 			if (rv != 0) {
   14435 				data[i] = 0xffff;
   14436 				rv = -1;
   14437 			}
   14438 			break;
   14439 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   14440 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14441 			if (rv != 0) {
   14442 				*data = INVM_DEFAULT_AL;
   14443 				rv = 0;
   14444 			}
   14445 			break;
   14446 		case NVM_OFF_CFG2:
   14447 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14448 			if (rv != 0) {
   14449 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   14450 				rv = 0;
   14451 			}
   14452 			break;
   14453 		case NVM_OFF_CFG4:
   14454 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14455 			if (rv != 0) {
   14456 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   14457 				rv = 0;
   14458 			}
   14459 			break;
   14460 		case NVM_OFF_LED_1_CFG:
   14461 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14462 			if (rv != 0) {
   14463 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   14464 				rv = 0;
   14465 			}
   14466 			break;
   14467 		case NVM_OFF_LED_0_2_CFG:
   14468 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14469 			if (rv != 0) {
   14470 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   14471 				rv = 0;
   14472 			}
   14473 			break;
   14474 		case NVM_OFF_ID_LED_SETTINGS:
   14475 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14476 			if (rv != 0) {
   14477 				*data = ID_LED_RESERVED_FFFF;
   14478 				rv = 0;
   14479 			}
   14480 			break;
   14481 		default:
   14482 			DPRINTF(sc, WM_DEBUG_NVM,
   14483 			    ("NVM word 0x%02x is not mapped.\n", offset));
   14484 			*data = NVM_RESERVED_WORD;
   14485 			break;
   14486 		}
   14487 	}
   14488 
   14489 	sc->nvm.release(sc);
   14490 	return rv;
   14491 }
   14492 
   14493 /* Lock, detecting NVM type, validate checksum, version and read */
   14494 
   14495 static int
   14496 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   14497 {
   14498 	uint32_t eecd = 0;
   14499 
   14500 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   14501 	    || sc->sc_type == WM_T_82583) {
   14502 		eecd = CSR_READ(sc, WMREG_EECD);
   14503 
   14504 		/* Isolate bits 15 & 16 */
   14505 		eecd = ((eecd >> 15) & 0x03);
   14506 
   14507 		/* If both bits are set, device is Flash type */
   14508 		if (eecd == 0x03)
   14509 			return 0;
   14510 	}
   14511 	return 1;
   14512 }
   14513 
   14514 static int
   14515 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   14516 {
   14517 	uint32_t eec;
   14518 
   14519 	eec = CSR_READ(sc, WMREG_EEC);
   14520 	if ((eec & EEC_FLASH_DETECTED) != 0)
   14521 		return 1;
   14522 
   14523 	return 0;
   14524 }
   14525 
   14526 /*
   14527  * wm_nvm_validate_checksum
   14528  *
   14529  * The checksum is defined as the sum of the first 64 (16 bit) words.
   14530  */
   14531 static int
   14532 wm_nvm_validate_checksum(struct wm_softc *sc)
   14533 {
   14534 	uint16_t checksum;
   14535 	uint16_t eeprom_data;
   14536 #ifdef WM_DEBUG
   14537 	uint16_t csum_wordaddr, valid_checksum;
   14538 #endif
   14539 	int i;
   14540 
   14541 	checksum = 0;
   14542 
   14543 	/* Don't check for I211 */
   14544 	if (sc->sc_type == WM_T_I211)
   14545 		return 0;
   14546 
   14547 #ifdef WM_DEBUG
   14548 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   14549 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   14550 		csum_wordaddr = NVM_OFF_COMPAT;
   14551 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   14552 	} else {
   14553 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   14554 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   14555 	}
   14556 
   14557 	/* Dump EEPROM image for debug */
   14558 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14559 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14560 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   14561 		/* XXX PCH_SPT? */
   14562 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   14563 		if ((eeprom_data & valid_checksum) == 0)
   14564 			DPRINTF(sc, WM_DEBUG_NVM,
   14565 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   14566 				device_xname(sc->sc_dev), eeprom_data,
   14567 				    valid_checksum));
   14568 	}
   14569 
   14570 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   14571 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   14572 		for (i = 0; i < NVM_SIZE; i++) {
   14573 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14574 				printf("XXXX ");
   14575 			else
   14576 				printf("%04hx ", eeprom_data);
   14577 			if (i % 8 == 7)
   14578 				printf("\n");
   14579 		}
   14580 	}
   14581 
   14582 #endif /* WM_DEBUG */
   14583 
   14584 	for (i = 0; i < NVM_SIZE; i++) {
   14585 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14586 			return -1;
   14587 		checksum += eeprom_data;
   14588 	}
   14589 
   14590 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14591 #ifdef WM_DEBUG
   14592 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14593 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14594 #endif
   14595 	}
   14596 
   14597 	return 0;
   14598 }
   14599 
   14600 static void
   14601 wm_nvm_version_invm(struct wm_softc *sc)
   14602 {
   14603 	uint32_t dword;
   14604 
   14605 	/*
   14606 	 * Linux's code to decode version is very strange, so we don't
   14607 	 * obey that algorithm and just use word 61 as the document.
   14608 	 * Perhaps it's not perfect though...
   14609 	 *
   14610 	 * Example:
   14611 	 *
   14612 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14613 	 */
   14614 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14615 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14616 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14617 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14618 }
   14619 
   14620 static void
   14621 wm_nvm_version(struct wm_softc *sc)
   14622 {
   14623 	uint16_t major, minor, build, patch;
   14624 	uint16_t uid0, uid1;
   14625 	uint16_t nvm_data;
   14626 	uint16_t off;
   14627 	bool check_version = false;
   14628 	bool check_optionrom = false;
   14629 	bool have_build = false;
   14630 	bool have_uid = true;
   14631 
   14632 	/*
   14633 	 * Version format:
   14634 	 *
   14635 	 * XYYZ
   14636 	 * X0YZ
   14637 	 * X0YY
   14638 	 *
   14639 	 * Example:
   14640 	 *
   14641 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14642 	 *	82571	0x50a6	5.10.6?
   14643 	 *	82572	0x506a	5.6.10?
   14644 	 *	82572EI	0x5069	5.6.9?
   14645 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14646 	 *		0x2013	2.1.3?
   14647 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14648 	 * ICH8+82567	0x0040	0.4.0?
   14649 	 * ICH9+82566	0x1040	1.4.0?
   14650 	 *ICH10+82567	0x0043	0.4.3?
   14651 	 *  PCH+82577	0x00c1	0.12.1?
   14652 	 * PCH2+82579	0x00d3	0.13.3?
   14653 	 *		0x00d4	0.13.4?
   14654 	 *  LPT+I218	0x0023	0.2.3?
   14655 	 *  SPT+I219	0x0084	0.8.4?
   14656 	 *  CNP+I219	0x0054	0.5.4?
   14657 	 */
   14658 
   14659 	/*
   14660 	 * XXX
   14661 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14662 	 * I've never seen real 82574 hardware with such small SPI ROM.
   14663 	 */
   14664 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14665 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14666 		have_uid = false;
   14667 
   14668 	switch (sc->sc_type) {
   14669 	case WM_T_82571:
   14670 	case WM_T_82572:
   14671 	case WM_T_82574:
   14672 	case WM_T_82583:
   14673 		check_version = true;
   14674 		check_optionrom = true;
   14675 		have_build = true;
   14676 		break;
   14677 	case WM_T_ICH8:
   14678 	case WM_T_ICH9:
   14679 	case WM_T_ICH10:
   14680 	case WM_T_PCH:
   14681 	case WM_T_PCH2:
   14682 	case WM_T_PCH_LPT:
   14683 	case WM_T_PCH_SPT:
   14684 	case WM_T_PCH_CNP:
   14685 		check_version = true;
   14686 		have_build = true;
   14687 		have_uid = false;
   14688 		break;
   14689 	case WM_T_82575:
   14690 	case WM_T_82576:
   14691 	case WM_T_82580:
   14692 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14693 			check_version = true;
   14694 		break;
   14695 	case WM_T_I211:
   14696 		wm_nvm_version_invm(sc);
   14697 		have_uid = false;
   14698 		goto printver;
   14699 	case WM_T_I210:
   14700 		if (!wm_nvm_flash_presence_i210(sc)) {
   14701 			wm_nvm_version_invm(sc);
   14702 			have_uid = false;
   14703 			goto printver;
   14704 		}
   14705 		/* FALLTHROUGH */
   14706 	case WM_T_I350:
   14707 	case WM_T_I354:
   14708 		check_version = true;
   14709 		check_optionrom = true;
   14710 		break;
   14711 	default:
   14712 		return;
   14713 	}
   14714 	if (check_version
   14715 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14716 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14717 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14718 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14719 			build = nvm_data & NVM_BUILD_MASK;
   14720 			have_build = true;
   14721 		} else
   14722 			minor = nvm_data & 0x00ff;
   14723 
   14724 		/* Decimal */
   14725 		minor = (minor / 16) * 10 + (minor % 16);
   14726 		sc->sc_nvm_ver_major = major;
   14727 		sc->sc_nvm_ver_minor = minor;
   14728 
   14729 printver:
   14730 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14731 		    sc->sc_nvm_ver_minor);
   14732 		if (have_build) {
   14733 			sc->sc_nvm_ver_build = build;
   14734 			aprint_verbose(".%d", build);
   14735 		}
   14736 	}
   14737 
   14738 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14739 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14740 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14741 		/* Option ROM Version */
   14742 		if ((off != 0x0000) && (off != 0xffff)) {
   14743 			int rv;
   14744 
   14745 			off += NVM_COMBO_VER_OFF;
   14746 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14747 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14748 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14749 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14750 				/* 16bits */
   14751 				major = uid0 >> 8;
   14752 				build = (uid0 << 8) | (uid1 >> 8);
   14753 				patch = uid1 & 0x00ff;
   14754 				aprint_verbose(", option ROM Version %d.%d.%d",
   14755 				    major, build, patch);
   14756 			}
   14757 		}
   14758 	}
   14759 
   14760 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14761 		aprint_verbose(", Image Unique ID %08x",
   14762 		    ((uint32_t)uid1 << 16) | uid0);
   14763 }
   14764 
   14765 /*
   14766  * wm_nvm_read:
   14767  *
   14768  *	Read data from the serial EEPROM.
   14769  */
   14770 static int
   14771 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14772 {
   14773 	int rv;
   14774 
   14775 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14776 		device_xname(sc->sc_dev), __func__));
   14777 
   14778 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14779 		return -1;
   14780 
   14781 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14782 
   14783 	return rv;
   14784 }
   14785 
   14786 /*
   14787  * Hardware semaphores.
   14788  * Very complexed...
   14789  */
   14790 
   14791 static int
   14792 wm_get_null(struct wm_softc *sc)
   14793 {
   14794 
   14795 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14796 		device_xname(sc->sc_dev), __func__));
   14797 	return 0;
   14798 }
   14799 
   14800 static void
   14801 wm_put_null(struct wm_softc *sc)
   14802 {
   14803 
   14804 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14805 		device_xname(sc->sc_dev), __func__));
   14806 	return;
   14807 }
   14808 
   14809 static int
   14810 wm_get_eecd(struct wm_softc *sc)
   14811 {
   14812 	uint32_t reg;
   14813 	int x;
   14814 
   14815 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14816 		device_xname(sc->sc_dev), __func__));
   14817 
   14818 	reg = CSR_READ(sc, WMREG_EECD);
   14819 
   14820 	/* Request EEPROM access. */
   14821 	reg |= EECD_EE_REQ;
   14822 	CSR_WRITE(sc, WMREG_EECD, reg);
   14823 
   14824 	/* ..and wait for it to be granted. */
   14825 	for (x = 0; x < 1000; x++) {
   14826 		reg = CSR_READ(sc, WMREG_EECD);
   14827 		if (reg & EECD_EE_GNT)
   14828 			break;
   14829 		delay(5);
   14830 	}
   14831 	if ((reg & EECD_EE_GNT) == 0) {
   14832 		aprint_error_dev(sc->sc_dev,
   14833 		    "could not acquire EEPROM GNT\n");
   14834 		reg &= ~EECD_EE_REQ;
   14835 		CSR_WRITE(sc, WMREG_EECD, reg);
   14836 		return -1;
   14837 	}
   14838 
   14839 	return 0;
   14840 }
   14841 
   14842 static void
   14843 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14844 {
   14845 
   14846 	*eecd |= EECD_SK;
   14847 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14848 	CSR_WRITE_FLUSH(sc);
   14849 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14850 		delay(1);
   14851 	else
   14852 		delay(50);
   14853 }
   14854 
   14855 static void
   14856 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14857 {
   14858 
   14859 	*eecd &= ~EECD_SK;
   14860 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14861 	CSR_WRITE_FLUSH(sc);
   14862 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14863 		delay(1);
   14864 	else
   14865 		delay(50);
   14866 }
   14867 
   14868 static void
   14869 wm_put_eecd(struct wm_softc *sc)
   14870 {
   14871 	uint32_t reg;
   14872 
   14873 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14874 		device_xname(sc->sc_dev), __func__));
   14875 
   14876 	/* Stop nvm */
   14877 	reg = CSR_READ(sc, WMREG_EECD);
   14878 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14879 		/* Pull CS high */
   14880 		reg |= EECD_CS;
   14881 		wm_nvm_eec_clock_lower(sc, &reg);
   14882 	} else {
   14883 		/* CS on Microwire is active-high */
   14884 		reg &= ~(EECD_CS | EECD_DI);
   14885 		CSR_WRITE(sc, WMREG_EECD, reg);
   14886 		wm_nvm_eec_clock_raise(sc, &reg);
   14887 		wm_nvm_eec_clock_lower(sc, &reg);
   14888 	}
   14889 
   14890 	reg = CSR_READ(sc, WMREG_EECD);
   14891 	reg &= ~EECD_EE_REQ;
   14892 	CSR_WRITE(sc, WMREG_EECD, reg);
   14893 
   14894 	return;
   14895 }
   14896 
   14897 /*
   14898  * Get hardware semaphore.
   14899  * Same as e1000_get_hw_semaphore_generic()
   14900  */
   14901 static int
   14902 wm_get_swsm_semaphore(struct wm_softc *sc)
   14903 {
   14904 	int32_t timeout;
   14905 	uint32_t swsm;
   14906 
   14907 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14908 		device_xname(sc->sc_dev), __func__));
   14909 	KASSERT(sc->sc_nvm_wordsize > 0);
   14910 
   14911 retry:
   14912 	/* Get the SW semaphore. */
   14913 	timeout = sc->sc_nvm_wordsize + 1;
   14914 	while (timeout) {
   14915 		swsm = CSR_READ(sc, WMREG_SWSM);
   14916 
   14917 		if ((swsm & SWSM_SMBI) == 0)
   14918 			break;
   14919 
   14920 		delay(50);
   14921 		timeout--;
   14922 	}
   14923 
   14924 	if (timeout == 0) {
   14925 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14926 			/*
   14927 			 * In rare circumstances, the SW semaphore may already
   14928 			 * be held unintentionally. Clear the semaphore once
   14929 			 * before giving up.
   14930 			 */
   14931 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14932 			wm_put_swsm_semaphore(sc);
   14933 			goto retry;
   14934 		}
   14935 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
   14936 		return -1;
   14937 	}
   14938 
   14939 	/* Get the FW semaphore. */
   14940 	timeout = sc->sc_nvm_wordsize + 1;
   14941 	while (timeout) {
   14942 		swsm = CSR_READ(sc, WMREG_SWSM);
   14943 		swsm |= SWSM_SWESMBI;
   14944 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14945 		/* If we managed to set the bit we got the semaphore. */
   14946 		swsm = CSR_READ(sc, WMREG_SWSM);
   14947 		if (swsm & SWSM_SWESMBI)
   14948 			break;
   14949 
   14950 		delay(50);
   14951 		timeout--;
   14952 	}
   14953 
   14954 	if (timeout == 0) {
   14955 		aprint_error_dev(sc->sc_dev,
   14956 		    "could not acquire SWSM SWESMBI\n");
   14957 		/* Release semaphores */
   14958 		wm_put_swsm_semaphore(sc);
   14959 		return -1;
   14960 	}
   14961 	return 0;
   14962 }
   14963 
   14964 /*
   14965  * Put hardware semaphore.
   14966  * Same as e1000_put_hw_semaphore_generic()
   14967  */
   14968 static void
   14969 wm_put_swsm_semaphore(struct wm_softc *sc)
   14970 {
   14971 	uint32_t swsm;
   14972 
   14973 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14974 		device_xname(sc->sc_dev), __func__));
   14975 
   14976 	swsm = CSR_READ(sc, WMREG_SWSM);
   14977 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14978 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14979 }
   14980 
   14981 /*
   14982  * Get SW/FW semaphore.
   14983  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14984  */
   14985 static int
   14986 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14987 {
   14988 	uint32_t swfw_sync;
   14989 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14990 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14991 	int timeout;
   14992 
   14993 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14994 		device_xname(sc->sc_dev), __func__));
   14995 
   14996 	if (sc->sc_type == WM_T_80003)
   14997 		timeout = 50;
   14998 	else
   14999 		timeout = 200;
   15000 
   15001 	while (timeout) {
   15002 		if (wm_get_swsm_semaphore(sc)) {
   15003 			aprint_error_dev(sc->sc_dev,
   15004 			    "%s: failed to get semaphore\n",
   15005 			    __func__);
   15006 			return -1;
   15007 		}
   15008 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15009 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   15010 			swfw_sync |= swmask;
   15011 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15012 			wm_put_swsm_semaphore(sc);
   15013 			return 0;
   15014 		}
   15015 		wm_put_swsm_semaphore(sc);
   15016 		delay(5000);
   15017 		timeout--;
   15018 	}
   15019 	device_printf(sc->sc_dev,
   15020 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   15021 	    mask, swfw_sync);
   15022 	return -1;
   15023 }
   15024 
   15025 static void
   15026 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   15027 {
   15028 	uint32_t swfw_sync;
   15029 
   15030 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15031 		device_xname(sc->sc_dev), __func__));
   15032 
   15033 	while (wm_get_swsm_semaphore(sc) != 0)
   15034 		continue;
   15035 
   15036 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   15037 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   15038 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   15039 
   15040 	wm_put_swsm_semaphore(sc);
   15041 }
   15042 
   15043 static int
   15044 wm_get_nvm_80003(struct wm_softc *sc)
   15045 {
   15046 	int rv;
   15047 
   15048 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   15049 		device_xname(sc->sc_dev), __func__));
   15050 
   15051 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   15052 		aprint_error_dev(sc->sc_dev,
   15053 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   15054 		return rv;
   15055 	}
   15056 
   15057 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15058 	    && (rv = wm_get_eecd(sc)) != 0) {
   15059 		aprint_error_dev(sc->sc_dev,
   15060 		    "%s: failed to get semaphore(EECD)\n", __func__);
   15061 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15062 		return rv;
   15063 	}
   15064 
   15065 	return 0;
   15066 }
   15067 
   15068 static void
   15069 wm_put_nvm_80003(struct wm_softc *sc)
   15070 {
   15071 
   15072 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15073 		device_xname(sc->sc_dev), __func__));
   15074 
   15075 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15076 		wm_put_eecd(sc);
   15077 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   15078 }
   15079 
   15080 static int
   15081 wm_get_nvm_82571(struct wm_softc *sc)
   15082 {
   15083 	int rv;
   15084 
   15085 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15086 		device_xname(sc->sc_dev), __func__));
   15087 
   15088 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   15089 		return rv;
   15090 
   15091 	switch (sc->sc_type) {
   15092 	case WM_T_82573:
   15093 		break;
   15094 	default:
   15095 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15096 			rv = wm_get_eecd(sc);
   15097 		break;
   15098 	}
   15099 
   15100 	if (rv != 0) {
   15101 		aprint_error_dev(sc->sc_dev,
   15102 		    "%s: failed to get semaphore\n",
   15103 		    __func__);
   15104 		wm_put_swsm_semaphore(sc);
   15105 	}
   15106 
   15107 	return rv;
   15108 }
   15109 
   15110 static void
   15111 wm_put_nvm_82571(struct wm_softc *sc)
   15112 {
   15113 
   15114 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15115 		device_xname(sc->sc_dev), __func__));
   15116 
   15117 	switch (sc->sc_type) {
   15118 	case WM_T_82573:
   15119 		break;
   15120 	default:
   15121 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   15122 			wm_put_eecd(sc);
   15123 		break;
   15124 	}
   15125 
   15126 	wm_put_swsm_semaphore(sc);
   15127 }
   15128 
   15129 static int
   15130 wm_get_phy_82575(struct wm_softc *sc)
   15131 {
   15132 
   15133 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15134 		device_xname(sc->sc_dev), __func__));
   15135 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15136 }
   15137 
   15138 static void
   15139 wm_put_phy_82575(struct wm_softc *sc)
   15140 {
   15141 
   15142 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15143 		device_xname(sc->sc_dev), __func__));
   15144 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   15145 }
   15146 
   15147 static int
   15148 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   15149 {
   15150 	uint32_t ext_ctrl;
   15151 	int timeout = 200;
   15152 
   15153 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15154 		device_xname(sc->sc_dev), __func__));
   15155 
   15156 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15157 	for (timeout = 0; timeout < 200; timeout++) {
   15158 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15159 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15160 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15161 
   15162 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15163 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15164 			return 0;
   15165 		delay(5000);
   15166 	}
   15167 	device_printf(sc->sc_dev,
   15168 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   15169 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15170 	return -1;
   15171 }
   15172 
   15173 static void
   15174 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   15175 {
   15176 	uint32_t ext_ctrl;
   15177 
   15178 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15179 		device_xname(sc->sc_dev), __func__));
   15180 
   15181 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15182 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15183 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15184 
   15185 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   15186 }
   15187 
   15188 static int
   15189 wm_get_swflag_ich8lan(struct wm_softc *sc)
   15190 {
   15191 	uint32_t ext_ctrl;
   15192 	int timeout;
   15193 
   15194 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15195 		device_xname(sc->sc_dev), __func__));
   15196 	mutex_enter(sc->sc_ich_phymtx);
   15197 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   15198 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15199 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   15200 			break;
   15201 		delay(1000);
   15202 	}
   15203 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   15204 		device_printf(sc->sc_dev,
   15205 		    "SW has already locked the resource\n");
   15206 		goto out;
   15207 	}
   15208 
   15209 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15210 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15211 	for (timeout = 0; timeout < 1000; timeout++) {
   15212 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15213 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   15214 			break;
   15215 		delay(1000);
   15216 	}
   15217 	if (timeout >= 1000) {
   15218 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   15219 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15220 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15221 		goto out;
   15222 	}
   15223 	return 0;
   15224 
   15225 out:
   15226 	mutex_exit(sc->sc_ich_phymtx);
   15227 	return -1;
   15228 }
   15229 
   15230 static void
   15231 wm_put_swflag_ich8lan(struct wm_softc *sc)
   15232 {
   15233 	uint32_t ext_ctrl;
   15234 
   15235 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15236 		device_xname(sc->sc_dev), __func__));
   15237 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   15238 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   15239 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15240 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   15241 	} else
   15242 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   15243 
   15244 	mutex_exit(sc->sc_ich_phymtx);
   15245 }
   15246 
   15247 static int
   15248 wm_get_nvm_ich8lan(struct wm_softc *sc)
   15249 {
   15250 
   15251 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15252 		device_xname(sc->sc_dev), __func__));
   15253 	mutex_enter(sc->sc_ich_nvmmtx);
   15254 
   15255 	return 0;
   15256 }
   15257 
   15258 static void
   15259 wm_put_nvm_ich8lan(struct wm_softc *sc)
   15260 {
   15261 
   15262 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15263 		device_xname(sc->sc_dev), __func__));
   15264 	mutex_exit(sc->sc_ich_nvmmtx);
   15265 }
   15266 
   15267 static int
   15268 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   15269 {
   15270 	int i = 0;
   15271 	uint32_t reg;
   15272 
   15273 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15274 		device_xname(sc->sc_dev), __func__));
   15275 
   15276 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15277 	do {
   15278 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   15279 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15280 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15281 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   15282 			break;
   15283 		delay(2*1000);
   15284 		i++;
   15285 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   15286 
   15287 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   15288 		wm_put_hw_semaphore_82573(sc);
   15289 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   15290 		    device_xname(sc->sc_dev));
   15291 		return -1;
   15292 	}
   15293 
   15294 	return 0;
   15295 }
   15296 
   15297 static void
   15298 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   15299 {
   15300 	uint32_t reg;
   15301 
   15302 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15303 		device_xname(sc->sc_dev), __func__));
   15304 
   15305 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15306 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   15307 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15308 }
   15309 
   15310 /*
   15311  * Management mode and power management related subroutines.
   15312  * BMC, AMT, suspend/resume and EEE.
   15313  */
   15314 
   15315 #ifdef WM_WOL
   15316 static int
   15317 wm_check_mng_mode(struct wm_softc *sc)
   15318 {
   15319 	int rv;
   15320 
   15321 	switch (sc->sc_type) {
   15322 	case WM_T_ICH8:
   15323 	case WM_T_ICH9:
   15324 	case WM_T_ICH10:
   15325 	case WM_T_PCH:
   15326 	case WM_T_PCH2:
   15327 	case WM_T_PCH_LPT:
   15328 	case WM_T_PCH_SPT:
   15329 	case WM_T_PCH_CNP:
   15330 		rv = wm_check_mng_mode_ich8lan(sc);
   15331 		break;
   15332 	case WM_T_82574:
   15333 	case WM_T_82583:
   15334 		rv = wm_check_mng_mode_82574(sc);
   15335 		break;
   15336 	case WM_T_82571:
   15337 	case WM_T_82572:
   15338 	case WM_T_82573:
   15339 	case WM_T_80003:
   15340 		rv = wm_check_mng_mode_generic(sc);
   15341 		break;
   15342 	default:
   15343 		/* Noting to do */
   15344 		rv = 0;
   15345 		break;
   15346 	}
   15347 
   15348 	return rv;
   15349 }
   15350 
   15351 static int
   15352 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   15353 {
   15354 	uint32_t fwsm;
   15355 
   15356 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15357 
   15358 	if (((fwsm & FWSM_FW_VALID) != 0)
   15359 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15360 		return 1;
   15361 
   15362 	return 0;
   15363 }
   15364 
   15365 static int
   15366 wm_check_mng_mode_82574(struct wm_softc *sc)
   15367 {
   15368 	uint16_t data;
   15369 
   15370 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15371 
   15372 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   15373 		return 1;
   15374 
   15375 	return 0;
   15376 }
   15377 
   15378 static int
   15379 wm_check_mng_mode_generic(struct wm_softc *sc)
   15380 {
   15381 	uint32_t fwsm;
   15382 
   15383 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15384 
   15385 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   15386 		return 1;
   15387 
   15388 	return 0;
   15389 }
   15390 #endif /* WM_WOL */
   15391 
   15392 static int
   15393 wm_enable_mng_pass_thru(struct wm_softc *sc)
   15394 {
   15395 	uint32_t manc, fwsm, factps;
   15396 
   15397 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   15398 		return 0;
   15399 
   15400 	manc = CSR_READ(sc, WMREG_MANC);
   15401 
   15402 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   15403 		device_xname(sc->sc_dev), manc));
   15404 	if ((manc & MANC_RECV_TCO_EN) == 0)
   15405 		return 0;
   15406 
   15407 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   15408 		fwsm = CSR_READ(sc, WMREG_FWSM);
   15409 		factps = CSR_READ(sc, WMREG_FACTPS);
   15410 		if (((factps & FACTPS_MNGCG) == 0)
   15411 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   15412 			return 1;
   15413 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   15414 		uint16_t data;
   15415 
   15416 		factps = CSR_READ(sc, WMREG_FACTPS);
   15417 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15418 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   15419 			device_xname(sc->sc_dev), factps, data));
   15420 		if (((factps & FACTPS_MNGCG) == 0)
   15421 		    && ((data & NVM_CFG2_MNGM_MASK)
   15422 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   15423 			return 1;
   15424 	} else if (((manc & MANC_SMBUS_EN) != 0)
   15425 	    && ((manc & MANC_ASF_EN) == 0))
   15426 		return 1;
   15427 
   15428 	return 0;
   15429 }
   15430 
   15431 static bool
   15432 wm_phy_resetisblocked(struct wm_softc *sc)
   15433 {
   15434 	bool blocked = false;
   15435 	uint32_t reg;
   15436 	int i = 0;
   15437 
   15438 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15439 		device_xname(sc->sc_dev), __func__));
   15440 
   15441 	switch (sc->sc_type) {
   15442 	case WM_T_ICH8:
   15443 	case WM_T_ICH9:
   15444 	case WM_T_ICH10:
   15445 	case WM_T_PCH:
   15446 	case WM_T_PCH2:
   15447 	case WM_T_PCH_LPT:
   15448 	case WM_T_PCH_SPT:
   15449 	case WM_T_PCH_CNP:
   15450 		do {
   15451 			reg = CSR_READ(sc, WMREG_FWSM);
   15452 			if ((reg & FWSM_RSPCIPHY) == 0) {
   15453 				blocked = true;
   15454 				delay(10*1000);
   15455 				continue;
   15456 			}
   15457 			blocked = false;
   15458 		} while (blocked && (i++ < 30));
   15459 		return blocked;
   15460 		break;
   15461 	case WM_T_82571:
   15462 	case WM_T_82572:
   15463 	case WM_T_82573:
   15464 	case WM_T_82574:
   15465 	case WM_T_82583:
   15466 	case WM_T_80003:
   15467 		reg = CSR_READ(sc, WMREG_MANC);
   15468 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   15469 			return true;
   15470 		else
   15471 			return false;
   15472 		break;
   15473 	default:
   15474 		/* No problem */
   15475 		break;
   15476 	}
   15477 
   15478 	return false;
   15479 }
   15480 
   15481 static void
   15482 wm_get_hw_control(struct wm_softc *sc)
   15483 {
   15484 	uint32_t reg;
   15485 
   15486 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15487 		device_xname(sc->sc_dev), __func__));
   15488 
   15489 	if (sc->sc_type == WM_T_82573) {
   15490 		reg = CSR_READ(sc, WMREG_SWSM);
   15491 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   15492 	} else if (sc->sc_type >= WM_T_82571) {
   15493 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15494 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   15495 	}
   15496 }
   15497 
   15498 static void
   15499 wm_release_hw_control(struct wm_softc *sc)
   15500 {
   15501 	uint32_t reg;
   15502 
   15503 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15504 		device_xname(sc->sc_dev), __func__));
   15505 
   15506 	if (sc->sc_type == WM_T_82573) {
   15507 		reg = CSR_READ(sc, WMREG_SWSM);
   15508 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   15509 	} else if (sc->sc_type >= WM_T_82571) {
   15510 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15511 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   15512 	}
   15513 }
   15514 
   15515 static void
   15516 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   15517 {
   15518 	uint32_t reg;
   15519 
   15520 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15521 		device_xname(sc->sc_dev), __func__));
   15522 
   15523 	if (sc->sc_type < WM_T_PCH2)
   15524 		return;
   15525 
   15526 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15527 
   15528 	if (gate)
   15529 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   15530 	else
   15531 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   15532 
   15533 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15534 }
   15535 
   15536 static int
   15537 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   15538 {
   15539 	uint32_t fwsm, reg;
   15540 	int rv;
   15541 
   15542 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15543 		device_xname(sc->sc_dev), __func__));
   15544 
   15545 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   15546 	wm_gate_hw_phy_config_ich8lan(sc, true);
   15547 
   15548 	/* Disable ULP */
   15549 	wm_ulp_disable(sc);
   15550 
   15551 	/* Acquire PHY semaphore */
   15552 	rv = sc->phy.acquire(sc);
   15553 	if (rv != 0) {
   15554 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15555 		device_xname(sc->sc_dev), __func__));
   15556 		return rv;
   15557 	}
   15558 
   15559 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   15560 	 * inaccessible and resetting the PHY is not blocked, toggle the
   15561 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   15562 	 */
   15563 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15564 	switch (sc->sc_type) {
   15565 	case WM_T_PCH_LPT:
   15566 	case WM_T_PCH_SPT:
   15567 	case WM_T_PCH_CNP:
   15568 		if (wm_phy_is_accessible_pchlan(sc))
   15569 			break;
   15570 
   15571 		/* Before toggling LANPHYPC, see if PHY is accessible by
   15572 		 * forcing MAC to SMBus mode first.
   15573 		 */
   15574 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15575 		reg |= CTRL_EXT_FORCE_SMBUS;
   15576 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15577 #if 0
   15578 		/* XXX Isn't this required??? */
   15579 		CSR_WRITE_FLUSH(sc);
   15580 #endif
   15581 		/* Wait 50 milliseconds for MAC to finish any retries
   15582 		 * that it might be trying to perform from previous
   15583 		 * attempts to acknowledge any phy read requests.
   15584 		 */
   15585 		delay(50 * 1000);
   15586 		/* FALLTHROUGH */
   15587 	case WM_T_PCH2:
   15588 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15589 			break;
   15590 		/* FALLTHROUGH */
   15591 	case WM_T_PCH:
   15592 		if (sc->sc_type == WM_T_PCH)
   15593 			if ((fwsm & FWSM_FW_VALID) != 0)
   15594 				break;
   15595 
   15596 		if (wm_phy_resetisblocked(sc) == true) {
   15597 			device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
   15598 			break;
   15599 		}
   15600 
   15601 		/* Toggle LANPHYPC Value bit */
   15602 		wm_toggle_lanphypc_pch_lpt(sc);
   15603 
   15604 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15605 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15606 				break;
   15607 
   15608 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15609 			 * so ensure that the MAC is also out of SMBus mode
   15610 			 */
   15611 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15612 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15613 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15614 
   15615 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15616 				break;
   15617 			rv = -1;
   15618 		}
   15619 		break;
   15620 	default:
   15621 		break;
   15622 	}
   15623 
   15624 	/* Release semaphore */
   15625 	sc->phy.release(sc);
   15626 
   15627 	if (rv == 0) {
   15628 		/* Check to see if able to reset PHY.  Print error if not */
   15629 		if (wm_phy_resetisblocked(sc)) {
   15630 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15631 			goto out;
   15632 		}
   15633 
   15634 		/* Reset the PHY before any access to it.  Doing so, ensures
   15635 		 * that the PHY is in a known good state before we read/write
   15636 		 * PHY registers.  The generic reset is sufficient here,
   15637 		 * because we haven't determined the PHY type yet.
   15638 		 */
   15639 		if (wm_reset_phy(sc) != 0)
   15640 			goto out;
   15641 
   15642 		/* On a successful reset, possibly need to wait for the PHY
   15643 		 * to quiesce to an accessible state before returning control
   15644 		 * to the calling function.  If the PHY does not quiesce, then
   15645 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15646 		 *  the PHY is in.
   15647 		 */
   15648 		if (wm_phy_resetisblocked(sc))
   15649 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15650 	}
   15651 
   15652 out:
   15653 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15654 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15655 		delay(10*1000);
   15656 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15657 	}
   15658 
   15659 	return 0;
   15660 }
   15661 
   15662 static void
   15663 wm_init_manageability(struct wm_softc *sc)
   15664 {
   15665 
   15666 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15667 		device_xname(sc->sc_dev), __func__));
   15668 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
   15669 
   15670 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15671 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15672 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15673 
   15674 		/* Disable hardware interception of ARP */
   15675 		manc &= ~MANC_ARP_EN;
   15676 
   15677 		/* Enable receiving management packets to the host */
   15678 		if (sc->sc_type >= WM_T_82571) {
   15679 			manc |= MANC_EN_MNG2HOST;
   15680 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15681 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15682 		}
   15683 
   15684 		CSR_WRITE(sc, WMREG_MANC, manc);
   15685 	}
   15686 }
   15687 
   15688 static void
   15689 wm_release_manageability(struct wm_softc *sc)
   15690 {
   15691 
   15692 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15693 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15694 
   15695 		manc |= MANC_ARP_EN;
   15696 		if (sc->sc_type >= WM_T_82571)
   15697 			manc &= ~MANC_EN_MNG2HOST;
   15698 
   15699 		CSR_WRITE(sc, WMREG_MANC, manc);
   15700 	}
   15701 }
   15702 
   15703 static void
   15704 wm_get_wakeup(struct wm_softc *sc)
   15705 {
   15706 
   15707 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15708 	switch (sc->sc_type) {
   15709 	case WM_T_82573:
   15710 	case WM_T_82583:
   15711 		sc->sc_flags |= WM_F_HAS_AMT;
   15712 		/* FALLTHROUGH */
   15713 	case WM_T_80003:
   15714 	case WM_T_82575:
   15715 	case WM_T_82576:
   15716 	case WM_T_82580:
   15717 	case WM_T_I350:
   15718 	case WM_T_I354:
   15719 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15720 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15721 		/* FALLTHROUGH */
   15722 	case WM_T_82541:
   15723 	case WM_T_82541_2:
   15724 	case WM_T_82547:
   15725 	case WM_T_82547_2:
   15726 	case WM_T_82571:
   15727 	case WM_T_82572:
   15728 	case WM_T_82574:
   15729 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15730 		break;
   15731 	case WM_T_ICH8:
   15732 	case WM_T_ICH9:
   15733 	case WM_T_ICH10:
   15734 	case WM_T_PCH:
   15735 	case WM_T_PCH2:
   15736 	case WM_T_PCH_LPT:
   15737 	case WM_T_PCH_SPT:
   15738 	case WM_T_PCH_CNP:
   15739 		sc->sc_flags |= WM_F_HAS_AMT;
   15740 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15741 		break;
   15742 	default:
   15743 		break;
   15744 	}
   15745 
   15746 	/* 1: HAS_MANAGE */
   15747 	if (wm_enable_mng_pass_thru(sc) != 0)
   15748 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15749 
   15750 	/*
   15751 	 * Note that the WOL flags is set after the resetting of the eeprom
   15752 	 * stuff
   15753 	 */
   15754 }
   15755 
   15756 /*
   15757  * Unconfigure Ultra Low Power mode.
   15758  * Only for I217 and newer (see below).
   15759  */
   15760 static int
   15761 wm_ulp_disable(struct wm_softc *sc)
   15762 {
   15763 	uint32_t reg;
   15764 	uint16_t phyreg;
   15765 	int i = 0, rv;
   15766 
   15767 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15768 		device_xname(sc->sc_dev), __func__));
   15769 	/* Exclude old devices */
   15770 	if ((sc->sc_type < WM_T_PCH_LPT)
   15771 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15772 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15773 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15774 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15775 		return 0;
   15776 
   15777 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15778 		/* Request ME un-configure ULP mode in the PHY */
   15779 		reg = CSR_READ(sc, WMREG_H2ME);
   15780 		reg &= ~H2ME_ULP;
   15781 		reg |= H2ME_ENFORCE_SETTINGS;
   15782 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15783 
   15784 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15785 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15786 			if (i++ == 30) {
   15787 				device_printf(sc->sc_dev, "%s timed out\n",
   15788 				    __func__);
   15789 				return -1;
   15790 			}
   15791 			delay(10 * 1000);
   15792 		}
   15793 		reg = CSR_READ(sc, WMREG_H2ME);
   15794 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15795 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15796 
   15797 		return 0;
   15798 	}
   15799 
   15800 	/* Acquire semaphore */
   15801 	rv = sc->phy.acquire(sc);
   15802 	if (rv != 0) {
   15803 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15804 		device_xname(sc->sc_dev), __func__));
   15805 		return rv;
   15806 	}
   15807 
   15808 	/* Toggle LANPHYPC */
   15809 	wm_toggle_lanphypc_pch_lpt(sc);
   15810 
   15811 	/* Unforce SMBus mode in PHY */
   15812 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15813 	if (rv != 0) {
   15814 		uint32_t reg2;
   15815 
   15816 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15817 			__func__);
   15818 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15819 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15820 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15821 		delay(50 * 1000);
   15822 
   15823 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15824 		    &phyreg);
   15825 		if (rv != 0)
   15826 			goto release;
   15827 	}
   15828 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15829 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15830 
   15831 	/* Unforce SMBus mode in MAC */
   15832 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15833 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15834 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15835 
   15836 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15837 	if (rv != 0)
   15838 		goto release;
   15839 	phyreg |= HV_PM_CTRL_K1_ENA;
   15840 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15841 
   15842 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15843 		&phyreg);
   15844 	if (rv != 0)
   15845 		goto release;
   15846 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15847 	    | I218_ULP_CONFIG1_STICKY_ULP
   15848 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15849 	    | I218_ULP_CONFIG1_WOL_HOST
   15850 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15851 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15852 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15853 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15854 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15855 	phyreg |= I218_ULP_CONFIG1_START;
   15856 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15857 
   15858 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15859 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15860 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15861 
   15862 release:
   15863 	/* Release semaphore */
   15864 	sc->phy.release(sc);
   15865 	wm_gmii_reset(sc);
   15866 	delay(50 * 1000);
   15867 
   15868 	return rv;
   15869 }
   15870 
   15871 /* WOL in the newer chipset interfaces (pchlan) */
   15872 static int
   15873 wm_enable_phy_wakeup(struct wm_softc *sc)
   15874 {
   15875 	device_t dev = sc->sc_dev;
   15876 	uint32_t mreg, moff;
   15877 	uint16_t wuce, wuc, wufc, preg;
   15878 	int i, rv;
   15879 
   15880 	KASSERT(sc->sc_type >= WM_T_PCH);
   15881 
   15882 	/* Copy MAC RARs to PHY RARs */
   15883 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15884 
   15885 	/* Activate PHY wakeup */
   15886 	rv = sc->phy.acquire(sc);
   15887 	if (rv != 0) {
   15888 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15889 		    __func__);
   15890 		return rv;
   15891 	}
   15892 
   15893 	/*
   15894 	 * Enable access to PHY wakeup registers.
   15895 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15896 	 */
   15897 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15898 	if (rv != 0) {
   15899 		device_printf(dev,
   15900 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15901 		goto release;
   15902 	}
   15903 
   15904 	/* Copy MAC MTA to PHY MTA */
   15905 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15906 		uint16_t lo, hi;
   15907 
   15908 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15909 		lo = (uint16_t)(mreg & 0xffff);
   15910 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15911 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15912 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15913 	}
   15914 
   15915 	/* Configure PHY Rx Control register */
   15916 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15917 	mreg = CSR_READ(sc, WMREG_RCTL);
   15918 	if (mreg & RCTL_UPE)
   15919 		preg |= BM_RCTL_UPE;
   15920 	if (mreg & RCTL_MPE)
   15921 		preg |= BM_RCTL_MPE;
   15922 	preg &= ~(BM_RCTL_MO_MASK);
   15923 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15924 	if (moff != 0)
   15925 		preg |= moff << BM_RCTL_MO_SHIFT;
   15926 	if (mreg & RCTL_BAM)
   15927 		preg |= BM_RCTL_BAM;
   15928 	if (mreg & RCTL_PMCF)
   15929 		preg |= BM_RCTL_PMCF;
   15930 	mreg = CSR_READ(sc, WMREG_CTRL);
   15931 	if (mreg & CTRL_RFCE)
   15932 		preg |= BM_RCTL_RFCE;
   15933 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15934 
   15935 	wuc = WUC_APME | WUC_PME_EN;
   15936 	wufc = WUFC_MAG;
   15937 	/* Enable PHY wakeup in MAC register */
   15938 	CSR_WRITE(sc, WMREG_WUC,
   15939 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15940 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15941 
   15942 	/* Configure and enable PHY wakeup in PHY registers */
   15943 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15944 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15945 
   15946 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15947 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15948 
   15949 release:
   15950 	sc->phy.release(sc);
   15951 
   15952 	return 0;
   15953 }
   15954 
   15955 /* Power down workaround on D3 */
   15956 static void
   15957 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15958 {
   15959 	uint32_t reg;
   15960 	uint16_t phyreg;
   15961 	int i;
   15962 
   15963 	for (i = 0; i < 2; i++) {
   15964 		/* Disable link */
   15965 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15966 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15967 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15968 
   15969 		/*
   15970 		 * Call gig speed drop workaround on Gig disable before
   15971 		 * accessing any PHY registers
   15972 		 */
   15973 		if (sc->sc_type == WM_T_ICH8)
   15974 			wm_gig_downshift_workaround_ich8lan(sc);
   15975 
   15976 		/* Write VR power-down enable */
   15977 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15978 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15979 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15980 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15981 
   15982 		/* Read it back and test */
   15983 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15984 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15985 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15986 			break;
   15987 
   15988 		/* Issue PHY reset and repeat at most one more time */
   15989 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15990 	}
   15991 }
   15992 
   15993 /*
   15994  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15995  *  @sc: pointer to the HW structure
   15996  *
   15997  *  During S0 to Sx transition, it is possible the link remains at gig
   15998  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15999  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   16000  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   16001  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   16002  *  needs to be written.
   16003  *  Parts that support (and are linked to a partner which support) EEE in
   16004  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   16005  *  than 10Mbps w/o EEE.
   16006  */
   16007 static void
   16008 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   16009 {
   16010 	device_t dev = sc->sc_dev;
   16011 	struct ethercom *ec = &sc->sc_ethercom;
   16012 	uint32_t phy_ctrl;
   16013 	int rv;
   16014 
   16015 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   16016 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   16017 
   16018 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   16019 
   16020 	if (sc->sc_phytype == WMPHY_I217) {
   16021 		uint16_t devid = sc->sc_pcidevid;
   16022 
   16023 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   16024 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   16025 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   16026 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   16027 		    (sc->sc_type >= WM_T_PCH_SPT))
   16028 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   16029 			    CSR_READ(sc, WMREG_FEXTNVM6)
   16030 			    & ~FEXTNVM6_REQ_PLL_CLK);
   16031 
   16032 		if (sc->phy.acquire(sc) != 0)
   16033 			goto out;
   16034 
   16035 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16036 			uint16_t eee_advert;
   16037 
   16038 			rv = wm_read_emi_reg_locked(dev,
   16039 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   16040 			if (rv)
   16041 				goto release;
   16042 
   16043 			/*
   16044 			 * Disable LPLU if both link partners support 100BaseT
   16045 			 * EEE and 100Full is advertised on both ends of the
   16046 			 * link, and enable Auto Enable LPI since there will
   16047 			 * be no driver to enable LPI while in Sx.
   16048 			 */
   16049 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   16050 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   16051 				uint16_t anar, phy_reg;
   16052 
   16053 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   16054 				    &anar);
   16055 				if (anar & ANAR_TX_FD) {
   16056 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   16057 					    PHY_CTRL_NOND0A_LPLU);
   16058 
   16059 					/* Set Auto Enable LPI after link up */
   16060 					sc->phy.readreg_locked(dev, 2,
   16061 					    I217_LPI_GPIO_CTRL, &phy_reg);
   16062 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16063 					sc->phy.writereg_locked(dev, 2,
   16064 					    I217_LPI_GPIO_CTRL, phy_reg);
   16065 				}
   16066 			}
   16067 		}
   16068 
   16069 		/*
   16070 		 * For i217 Intel Rapid Start Technology support,
   16071 		 * when the system is going into Sx and no manageability engine
   16072 		 * is present, the driver must configure proxy to reset only on
   16073 		 * power good.	LPI (Low Power Idle) state must also reset only
   16074 		 * on power good, as well as the MTA (Multicast table array).
   16075 		 * The SMBus release must also be disabled on LCD reset.
   16076 		 */
   16077 
   16078 		/*
   16079 		 * Enable MTA to reset for Intel Rapid Start Technology
   16080 		 * Support
   16081 		 */
   16082 
   16083 release:
   16084 		sc->phy.release(sc);
   16085 	}
   16086 out:
   16087 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   16088 
   16089 	if (sc->sc_type == WM_T_ICH8)
   16090 		wm_gig_downshift_workaround_ich8lan(sc);
   16091 
   16092 	if (sc->sc_type >= WM_T_PCH) {
   16093 		wm_oem_bits_config_ich8lan(sc, false);
   16094 
   16095 		/* Reset PHY to activate OEM bits on 82577/8 */
   16096 		if (sc->sc_type == WM_T_PCH)
   16097 			wm_reset_phy(sc);
   16098 
   16099 		if (sc->phy.acquire(sc) != 0)
   16100 			return;
   16101 		wm_write_smbus_addr(sc);
   16102 		sc->phy.release(sc);
   16103 	}
   16104 }
   16105 
   16106 /*
   16107  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   16108  *  @sc: pointer to the HW structure
   16109  *
   16110  *  During Sx to S0 transitions on non-managed devices or managed devices
   16111  *  on which PHY resets are not blocked, if the PHY registers cannot be
   16112  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   16113  *  the PHY.
   16114  *  On i217, setup Intel Rapid Start Technology.
   16115  */
   16116 static int
   16117 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   16118 {
   16119 	device_t dev = sc->sc_dev;
   16120 	int rv;
   16121 
   16122 	if (sc->sc_type < WM_T_PCH2)
   16123 		return 0;
   16124 
   16125 	rv = wm_init_phy_workarounds_pchlan(sc);
   16126 	if (rv != 0)
   16127 		return rv;
   16128 
   16129 	/* For i217 Intel Rapid Start Technology support when the system
   16130 	 * is transitioning from Sx and no manageability engine is present
   16131 	 * configure SMBus to restore on reset, disable proxy, and enable
   16132 	 * the reset on MTA (Multicast table array).
   16133 	 */
   16134 	if (sc->sc_phytype == WMPHY_I217) {
   16135 		uint16_t phy_reg;
   16136 
   16137 		rv = sc->phy.acquire(sc);
   16138 		if (rv != 0)
   16139 			return rv;
   16140 
   16141 		/* Clear Auto Enable LPI after link up */
   16142 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   16143 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   16144 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   16145 
   16146 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16147 			/* Restore clear on SMB if no manageability engine
   16148 			 * is present
   16149 			 */
   16150 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   16151 			    &phy_reg);
   16152 			if (rv != 0)
   16153 				goto release;
   16154 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   16155 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   16156 
   16157 			/* Disable Proxy */
   16158 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   16159 		}
   16160 		/* Enable reset on MTA */
   16161 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   16162 		if (rv != 0)
   16163 			goto release;
   16164 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   16165 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   16166 
   16167 release:
   16168 		sc->phy.release(sc);
   16169 		return rv;
   16170 	}
   16171 
   16172 	return 0;
   16173 }
   16174 
   16175 static void
   16176 wm_enable_wakeup(struct wm_softc *sc)
   16177 {
   16178 	uint32_t reg, pmreg;
   16179 	pcireg_t pmode;
   16180 	int rv = 0;
   16181 
   16182 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16183 		device_xname(sc->sc_dev), __func__));
   16184 
   16185 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16186 	    &pmreg, NULL) == 0)
   16187 		return;
   16188 
   16189 	if ((sc->sc_flags & WM_F_WOL) == 0)
   16190 		goto pme;
   16191 
   16192 	/* Advertise the wakeup capability */
   16193 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   16194 	    | CTRL_SWDPIN(3));
   16195 
   16196 	/* Keep the laser running on fiber adapters */
   16197 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   16198 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   16199 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16200 		reg |= CTRL_EXT_SWDPIN(3);
   16201 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16202 	}
   16203 
   16204 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   16205 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   16206 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   16207 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   16208 		wm_suspend_workarounds_ich8lan(sc);
   16209 
   16210 #if 0	/* For the multicast packet */
   16211 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   16212 	reg |= WUFC_MC;
   16213 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   16214 #endif
   16215 
   16216 	if (sc->sc_type >= WM_T_PCH) {
   16217 		rv = wm_enable_phy_wakeup(sc);
   16218 		if (rv != 0)
   16219 			goto pme;
   16220 	} else {
   16221 		/* Enable wakeup by the MAC */
   16222 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   16223 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   16224 	}
   16225 
   16226 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   16227 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   16228 		|| (sc->sc_type == WM_T_PCH2))
   16229 	    && (sc->sc_phytype == WMPHY_IGP_3))
   16230 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   16231 
   16232 pme:
   16233 	/* Request PME */
   16234 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   16235 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   16236 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   16237 		/* For WOL */
   16238 		pmode |= PCI_PMCSR_PME_EN;
   16239 	} else {
   16240 		/* Disable WOL */
   16241 		pmode &= ~PCI_PMCSR_PME_EN;
   16242 	}
   16243 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   16244 }
   16245 
   16246 /* Disable ASPM L0s and/or L1 for workaround */
   16247 static void
   16248 wm_disable_aspm(struct wm_softc *sc)
   16249 {
   16250 	pcireg_t reg, mask = 0;
   16251 	unsigned const char *str = "";
   16252 
   16253 	/*
   16254 	 *  Only for PCIe device which has PCIe capability in the PCI config
   16255 	 * space.
   16256 	 */
   16257 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   16258 		return;
   16259 
   16260 	switch (sc->sc_type) {
   16261 	case WM_T_82571:
   16262 	case WM_T_82572:
   16263 		/*
   16264 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   16265 		 * State Power management L1 State (ASPM L1).
   16266 		 */
   16267 		mask = PCIE_LCSR_ASPM_L1;
   16268 		str = "L1 is";
   16269 		break;
   16270 	case WM_T_82573:
   16271 	case WM_T_82574:
   16272 	case WM_T_82583:
   16273 		/*
   16274 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   16275 		 *
   16276 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   16277 		 * some chipset.  The document of 82574 and 82583 says that
   16278 		 * disabling L0s with some specific chipset is sufficient,
   16279 		 * but we follow as of the Intel em driver does.
   16280 		 *
   16281 		 * References:
   16282 		 * Errata 8 of the Specification Update of i82573.
   16283 		 * Errata 20 of the Specification Update of i82574.
   16284 		 * Errata 9 of the Specification Update of i82583.
   16285 		 */
   16286 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   16287 		str = "L0s and L1 are";
   16288 		break;
   16289 	default:
   16290 		return;
   16291 	}
   16292 
   16293 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16294 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   16295 	reg &= ~mask;
   16296 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16297 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   16298 
   16299 	/* Print only in wm_attach() */
   16300 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   16301 		aprint_verbose_dev(sc->sc_dev,
   16302 		    "ASPM %s disabled to workaround the errata.\n", str);
   16303 }
   16304 
   16305 /* LPLU */
   16306 
   16307 static void
   16308 wm_lplu_d0_disable(struct wm_softc *sc)
   16309 {
   16310 	struct mii_data *mii = &sc->sc_mii;
   16311 	uint32_t reg;
   16312 	uint16_t phyval;
   16313 
   16314 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16315 		device_xname(sc->sc_dev), __func__));
   16316 
   16317 	if (sc->sc_phytype == WMPHY_IFE)
   16318 		return;
   16319 
   16320 	switch (sc->sc_type) {
   16321 	case WM_T_82571:
   16322 	case WM_T_82572:
   16323 	case WM_T_82573:
   16324 	case WM_T_82575:
   16325 	case WM_T_82576:
   16326 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   16327 		phyval &= ~PMR_D0_LPLU;
   16328 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   16329 		break;
   16330 	case WM_T_82580:
   16331 	case WM_T_I350:
   16332 	case WM_T_I210:
   16333 	case WM_T_I211:
   16334 		reg = CSR_READ(sc, WMREG_PHPM);
   16335 		reg &= ~PHPM_D0A_LPLU;
   16336 		CSR_WRITE(sc, WMREG_PHPM, reg);
   16337 		break;
   16338 	case WM_T_82574:
   16339 	case WM_T_82583:
   16340 	case WM_T_ICH8:
   16341 	case WM_T_ICH9:
   16342 	case WM_T_ICH10:
   16343 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16344 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   16345 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16346 		CSR_WRITE_FLUSH(sc);
   16347 		break;
   16348 	case WM_T_PCH:
   16349 	case WM_T_PCH2:
   16350 	case WM_T_PCH_LPT:
   16351 	case WM_T_PCH_SPT:
   16352 	case WM_T_PCH_CNP:
   16353 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   16354 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   16355 		if (wm_phy_resetisblocked(sc) == false)
   16356 			phyval |= HV_OEM_BITS_ANEGNOW;
   16357 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   16358 		break;
   16359 	default:
   16360 		break;
   16361 	}
   16362 }
   16363 
   16364 /* EEE */
   16365 
   16366 static int
   16367 wm_set_eee_i350(struct wm_softc *sc)
   16368 {
   16369 	struct ethercom *ec = &sc->sc_ethercom;
   16370 	uint32_t ipcnfg, eeer;
   16371 	uint32_t ipcnfg_mask
   16372 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   16373 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   16374 
   16375 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   16376 
   16377 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   16378 	eeer = CSR_READ(sc, WMREG_EEER);
   16379 
   16380 	/* Enable or disable per user setting */
   16381 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16382 		ipcnfg |= ipcnfg_mask;
   16383 		eeer |= eeer_mask;
   16384 	} else {
   16385 		ipcnfg &= ~ipcnfg_mask;
   16386 		eeer &= ~eeer_mask;
   16387 	}
   16388 
   16389 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   16390 	CSR_WRITE(sc, WMREG_EEER, eeer);
   16391 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   16392 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   16393 
   16394 	return 0;
   16395 }
   16396 
   16397 static int
   16398 wm_set_eee_pchlan(struct wm_softc *sc)
   16399 {
   16400 	device_t dev = sc->sc_dev;
   16401 	struct ethercom *ec = &sc->sc_ethercom;
   16402 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   16403 	int rv;
   16404 
   16405 	switch (sc->sc_phytype) {
   16406 	case WMPHY_82579:
   16407 		lpa = I82579_EEE_LP_ABILITY;
   16408 		pcs_status = I82579_EEE_PCS_STATUS;
   16409 		adv_addr = I82579_EEE_ADVERTISEMENT;
   16410 		break;
   16411 	case WMPHY_I217:
   16412 		lpa = I217_EEE_LP_ABILITY;
   16413 		pcs_status = I217_EEE_PCS_STATUS;
   16414 		adv_addr = I217_EEE_ADVERTISEMENT;
   16415 		break;
   16416 	default:
   16417 		return 0;
   16418 	}
   16419 
   16420 	rv = sc->phy.acquire(sc);
   16421 	if (rv != 0) {
   16422 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   16423 		return rv;
   16424 	}
   16425 
   16426 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   16427 	if (rv != 0)
   16428 		goto release;
   16429 
   16430 	/* Clear bits that enable EEE in various speeds */
   16431 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   16432 
   16433 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16434 		/* Save off link partner's EEE ability */
   16435 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   16436 		if (rv != 0)
   16437 			goto release;
   16438 
   16439 		/* Read EEE advertisement */
   16440 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   16441 			goto release;
   16442 
   16443 		/*
   16444 		 * Enable EEE only for speeds in which the link partner is
   16445 		 * EEE capable and for which we advertise EEE.
   16446 		 */
   16447 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   16448 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   16449 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   16450 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   16451 			if ((data & ANLPAR_TX_FD) != 0)
   16452 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   16453 			else {
   16454 				/*
   16455 				 * EEE is not supported in 100Half, so ignore
   16456 				 * partner's EEE in 100 ability if full-duplex
   16457 				 * is not advertised.
   16458 				 */
   16459 				sc->eee_lp_ability
   16460 				    &= ~AN_EEEADVERT_100_TX;
   16461 			}
   16462 		}
   16463 	}
   16464 
   16465 	if (sc->sc_phytype == WMPHY_82579) {
   16466 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   16467 		if (rv != 0)
   16468 			goto release;
   16469 
   16470 		data &= ~I82579_LPI_PLL_SHUT_100;
   16471 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   16472 	}
   16473 
   16474 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   16475 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   16476 		goto release;
   16477 
   16478 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   16479 release:
   16480 	sc->phy.release(sc);
   16481 
   16482 	return rv;
   16483 }
   16484 
   16485 static int
   16486 wm_set_eee(struct wm_softc *sc)
   16487 {
   16488 	struct ethercom *ec = &sc->sc_ethercom;
   16489 
   16490 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   16491 		return 0;
   16492 
   16493 	if (sc->sc_type == WM_T_I354) {
   16494 		/* I354 uses an external PHY */
   16495 		return 0; /* not yet */
   16496 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   16497 		return wm_set_eee_i350(sc);
   16498 	else if (sc->sc_type >= WM_T_PCH2)
   16499 		return wm_set_eee_pchlan(sc);
   16500 
   16501 	return 0;
   16502 }
   16503 
   16504 /*
   16505  * Workarounds (mainly PHY related).
   16506  * Basically, PHY's workarounds are in the PHY drivers.
   16507  */
   16508 
   16509 /* Workaround for 82566 Kumeran PCS lock loss */
   16510 static int
   16511 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   16512 {
   16513 	struct mii_data *mii = &sc->sc_mii;
   16514 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16515 	int i, reg, rv;
   16516 	uint16_t phyreg;
   16517 
   16518 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16519 		device_xname(sc->sc_dev), __func__));
   16520 
   16521 	/* If the link is not up, do nothing */
   16522 	if ((status & STATUS_LU) == 0)
   16523 		return 0;
   16524 
   16525 	/* Nothing to do if the link is other than 1Gbps */
   16526 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   16527 		return 0;
   16528 
   16529 	for (i = 0; i < 10; i++) {
   16530 		/* read twice */
   16531 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16532 		if (rv != 0)
   16533 			return rv;
   16534 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16535 		if (rv != 0)
   16536 			return rv;
   16537 
   16538 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   16539 			goto out;	/* GOOD! */
   16540 
   16541 		/* Reset the PHY */
   16542 		wm_reset_phy(sc);
   16543 		delay(5*1000);
   16544 	}
   16545 
   16546 	/* Disable GigE link negotiation */
   16547 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16548 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16549 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16550 
   16551 	/*
   16552 	 * Call gig speed drop workaround on Gig disable before accessing
   16553 	 * any PHY registers.
   16554 	 */
   16555 	wm_gig_downshift_workaround_ich8lan(sc);
   16556 
   16557 out:
   16558 	return 0;
   16559 }
   16560 
   16561 /*
   16562  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   16563  *  @sc: pointer to the HW structure
   16564  *
   16565  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   16566  *  LPLU, Gig disable, MDIC PHY reset):
   16567  *    1) Set Kumeran Near-end loopback
   16568  *    2) Clear Kumeran Near-end loopback
   16569  *  Should only be called for ICH8[m] devices with any 1G Phy.
   16570  */
   16571 static void
   16572 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   16573 {
   16574 	uint16_t kmreg;
   16575 
   16576 	/* Only for igp3 */
   16577 	if (sc->sc_phytype == WMPHY_IGP_3) {
   16578 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   16579 			return;
   16580 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   16581 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   16582 			return;
   16583 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   16584 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   16585 	}
   16586 }
   16587 
   16588 /*
   16589  * Workaround for pch's PHYs
   16590  * XXX should be moved to new PHY driver?
   16591  */
   16592 static int
   16593 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16594 {
   16595 	device_t dev = sc->sc_dev;
   16596 	struct mii_data *mii = &sc->sc_mii;
   16597 	struct mii_softc *child;
   16598 	uint16_t phy_data, phyrev = 0;
   16599 	int phytype = sc->sc_phytype;
   16600 	int rv;
   16601 
   16602 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16603 		device_xname(dev), __func__));
   16604 	KASSERT(sc->sc_type == WM_T_PCH);
   16605 
   16606 	/* Set MDIO slow mode before any other MDIO access */
   16607 	if (phytype == WMPHY_82577)
   16608 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16609 			return rv;
   16610 
   16611 	child = LIST_FIRST(&mii->mii_phys);
   16612 	if (child != NULL)
   16613 		phyrev = child->mii_mpd_rev;
   16614 
   16615 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16616 	if ((child != NULL) &&
   16617 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16618 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16619 		/* Disable generation of early preamble (0x4431) */
   16620 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16621 		    &phy_data);
   16622 		if (rv != 0)
   16623 			return rv;
   16624 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16625 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16626 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16627 		    phy_data);
   16628 		if (rv != 0)
   16629 			return rv;
   16630 
   16631 		/* Preamble tuning for SSC */
   16632 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16633 		if (rv != 0)
   16634 			return rv;
   16635 	}
   16636 
   16637 	/* 82578 */
   16638 	if (phytype == WMPHY_82578) {
   16639 		/*
   16640 		 * Return registers to default by doing a soft reset then
   16641 		 * writing 0x3140 to the control register
   16642 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16643 		 */
   16644 		if ((child != NULL) && (phyrev < 2)) {
   16645 			PHY_RESET(child);
   16646 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16647 			if (rv != 0)
   16648 				return rv;
   16649 		}
   16650 	}
   16651 
   16652 	/* Select page 0 */
   16653 	if ((rv = sc->phy.acquire(sc)) != 0)
   16654 		return rv;
   16655 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16656 	sc->phy.release(sc);
   16657 	if (rv != 0)
   16658 		return rv;
   16659 
   16660 	/*
   16661 	 * Configure the K1 Si workaround during phy reset assuming there is
   16662 	 * link so that it disables K1 if link is in 1Gbps.
   16663 	 */
   16664 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16665 		return rv;
   16666 
   16667 	/* Workaround for link disconnects on a busy hub in half duplex */
   16668 	rv = sc->phy.acquire(sc);
   16669 	if (rv)
   16670 		return rv;
   16671 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16672 	if (rv)
   16673 		goto release;
   16674 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16675 	    phy_data & 0x00ff);
   16676 	if (rv)
   16677 		goto release;
   16678 
   16679 	/* Set MSE higher to enable link to stay up when noise is high */
   16680 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16681 release:
   16682 	sc->phy.release(sc);
   16683 
   16684 	return rv;
   16685 }
   16686 
   16687 /*
   16688  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16689  *  @sc:   pointer to the HW structure
   16690  */
   16691 static void
   16692 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16693 {
   16694 
   16695 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16696 		device_xname(sc->sc_dev), __func__));
   16697 
   16698 	if (sc->phy.acquire(sc) != 0)
   16699 		return;
   16700 
   16701 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16702 
   16703 	sc->phy.release(sc);
   16704 }
   16705 
   16706 static void
   16707 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16708 {
   16709 	device_t dev = sc->sc_dev;
   16710 	uint32_t mac_reg;
   16711 	uint16_t i, wuce;
   16712 	int count;
   16713 
   16714 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16715 		device_xname(dev), __func__));
   16716 
   16717 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16718 		return;
   16719 
   16720 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16721 	count = wm_rar_count(sc);
   16722 	for (i = 0; i < count; i++) {
   16723 		uint16_t lo, hi;
   16724 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16725 		lo = (uint16_t)(mac_reg & 0xffff);
   16726 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16727 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16728 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16729 
   16730 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16731 		lo = (uint16_t)(mac_reg & 0xffff);
   16732 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16733 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16734 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16735 	}
   16736 
   16737 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16738 }
   16739 
   16740 /*
   16741  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16742  *  with 82579 PHY
   16743  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16744  */
   16745 static int
   16746 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16747 {
   16748 	device_t dev = sc->sc_dev;
   16749 	int rar_count;
   16750 	int rv;
   16751 	uint32_t mac_reg;
   16752 	uint16_t dft_ctrl, data;
   16753 	uint16_t i;
   16754 
   16755 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16756 		device_xname(dev), __func__));
   16757 
   16758 	if (sc->sc_type < WM_T_PCH2)
   16759 		return 0;
   16760 
   16761 	/* Acquire PHY semaphore */
   16762 	rv = sc->phy.acquire(sc);
   16763 	if (rv != 0)
   16764 		return rv;
   16765 
   16766 	/* Disable Rx path while enabling/disabling workaround */
   16767 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16768 	if (rv != 0)
   16769 		goto out;
   16770 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16771 	    dft_ctrl | (1 << 14));
   16772 	if (rv != 0)
   16773 		goto out;
   16774 
   16775 	if (enable) {
   16776 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16777 		 * SHRAL/H) and initial CRC values to the MAC
   16778 		 */
   16779 		rar_count = wm_rar_count(sc);
   16780 		for (i = 0; i < rar_count; i++) {
   16781 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16782 			uint32_t addr_high, addr_low;
   16783 
   16784 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16785 			if (!(addr_high & RAL_AV))
   16786 				continue;
   16787 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16788 			mac_addr[0] = (addr_low & 0xFF);
   16789 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16790 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16791 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16792 			mac_addr[4] = (addr_high & 0xFF);
   16793 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16794 
   16795 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16796 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16797 		}
   16798 
   16799 		/* Write Rx addresses to the PHY */
   16800 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16801 	}
   16802 
   16803 	/*
   16804 	 * If enable ==
   16805 	 *	true: Enable jumbo frame workaround in the MAC.
   16806 	 *	false: Write MAC register values back to h/w defaults.
   16807 	 */
   16808 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16809 	if (enable) {
   16810 		mac_reg &= ~(1 << 14);
   16811 		mac_reg |= (7 << 15);
   16812 	} else
   16813 		mac_reg &= ~(0xf << 14);
   16814 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16815 
   16816 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16817 	if (enable) {
   16818 		mac_reg |= RCTL_SECRC;
   16819 		sc->sc_rctl |= RCTL_SECRC;
   16820 		sc->sc_flags |= WM_F_CRC_STRIP;
   16821 	} else {
   16822 		mac_reg &= ~RCTL_SECRC;
   16823 		sc->sc_rctl &= ~RCTL_SECRC;
   16824 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16825 	}
   16826 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16827 
   16828 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16829 	if (rv != 0)
   16830 		goto out;
   16831 	if (enable)
   16832 		data |= 1 << 0;
   16833 	else
   16834 		data &= ~(1 << 0);
   16835 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16836 	if (rv != 0)
   16837 		goto out;
   16838 
   16839 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16840 	if (rv != 0)
   16841 		goto out;
   16842 	/*
   16843 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16844 	 * on both the enable case and the disable case. Is it correct?
   16845 	 */
   16846 	data &= ~(0xf << 8);
   16847 	data |= (0xb << 8);
   16848 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16849 	if (rv != 0)
   16850 		goto out;
   16851 
   16852 	/*
   16853 	 * If enable ==
   16854 	 *	true: Enable jumbo frame workaround in the PHY.
   16855 	 *	false: Write PHY register values back to h/w defaults.
   16856 	 */
   16857 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16858 	if (rv != 0)
   16859 		goto out;
   16860 	data &= ~(0x7F << 5);
   16861 	if (enable)
   16862 		data |= (0x37 << 5);
   16863 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16864 	if (rv != 0)
   16865 		goto out;
   16866 
   16867 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16868 	if (rv != 0)
   16869 		goto out;
   16870 	if (enable)
   16871 		data &= ~(1 << 13);
   16872 	else
   16873 		data |= (1 << 13);
   16874 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16875 	if (rv != 0)
   16876 		goto out;
   16877 
   16878 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16879 	if (rv != 0)
   16880 		goto out;
   16881 	data &= ~(0x3FF << 2);
   16882 	if (enable)
   16883 		data |= (I82579_TX_PTR_GAP << 2);
   16884 	else
   16885 		data |= (0x8 << 2);
   16886 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16887 	if (rv != 0)
   16888 		goto out;
   16889 
   16890 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16891 	    enable ? 0xf100 : 0x7e00);
   16892 	if (rv != 0)
   16893 		goto out;
   16894 
   16895 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16896 	if (rv != 0)
   16897 		goto out;
   16898 	if (enable)
   16899 		data |= 1 << 10;
   16900 	else
   16901 		data &= ~(1 << 10);
   16902 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16903 	if (rv != 0)
   16904 		goto out;
   16905 
   16906 	/* Re-enable Rx path after enabling/disabling workaround */
   16907 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16908 	    dft_ctrl & ~(1 << 14));
   16909 
   16910 out:
   16911 	sc->phy.release(sc);
   16912 
   16913 	return rv;
   16914 }
   16915 
   16916 /*
   16917  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16918  *  done after every PHY reset.
   16919  */
   16920 static int
   16921 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16922 {
   16923 	device_t dev = sc->sc_dev;
   16924 	int rv;
   16925 
   16926 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16927 		device_xname(dev), __func__));
   16928 	KASSERT(sc->sc_type == WM_T_PCH2);
   16929 
   16930 	/* Set MDIO slow mode before any other MDIO access */
   16931 	rv = wm_set_mdio_slow_mode_hv(sc);
   16932 	if (rv != 0)
   16933 		return rv;
   16934 
   16935 	rv = sc->phy.acquire(sc);
   16936 	if (rv != 0)
   16937 		return rv;
   16938 	/* Set MSE higher to enable link to stay up when noise is high */
   16939 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16940 	if (rv != 0)
   16941 		goto release;
   16942 	/* Drop link after 5 times MSE threshold was reached */
   16943 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16944 release:
   16945 	sc->phy.release(sc);
   16946 
   16947 	return rv;
   16948 }
   16949 
   16950 /**
   16951  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   16952  *  @link: link up bool flag
   16953  *
   16954  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   16955  *  preventing further DMA write requests.  Workaround the issue by disabling
   16956  *  the de-assertion of the clock request when in 1Gpbs mode.
   16957  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   16958  *  speeds in order to avoid Tx hangs.
   16959  **/
   16960 static int
   16961 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   16962 {
   16963 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   16964 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16965 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   16966 	uint16_t phyreg;
   16967 
   16968 	if (link && (speed == STATUS_SPEED_1000)) {
   16969 		sc->phy.acquire(sc);
   16970 		int rv = wm_kmrn_readreg_locked(sc,
   16971 		    KUMCTRLSTA_OFFSET_K1_CONFIG, &phyreg);
   16972 		if (rv != 0)
   16973 			goto release;
   16974 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16975 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   16976 		if (rv != 0)
   16977 			goto release;
   16978 		delay(20);
   16979 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   16980 
   16981 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16982 		    &phyreg);
   16983 release:
   16984 		sc->phy.release(sc);
   16985 		return rv;
   16986 	}
   16987 
   16988 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   16989 
   16990 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   16991 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   16992 	    || !link
   16993 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   16994 		goto update_fextnvm6;
   16995 
   16996 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   16997 
   16998 	/* Clear link status transmit timeout */
   16999 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   17000 	if (speed == STATUS_SPEED_100) {
   17001 		/* Set inband Tx timeout to 5x10us for 100Half */
   17002 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17003 
   17004 		/* Do not extend the K1 entry latency for 100Half */
   17005 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17006 	} else {
   17007 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   17008 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   17009 
   17010 		/* Extend the K1 entry latency for 10 Mbps */
   17011 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   17012 	}
   17013 
   17014 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   17015 
   17016 update_fextnvm6:
   17017 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   17018 	return 0;
   17019 }
   17020 
   17021 /*
   17022  *  wm_k1_gig_workaround_hv - K1 Si workaround
   17023  *  @sc:   pointer to the HW structure
   17024  *  @link: link up bool flag
   17025  *
   17026  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   17027  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   17028  *  If link is down, the function will restore the default K1 setting located
   17029  *  in the NVM.
   17030  */
   17031 static int
   17032 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   17033 {
   17034 	int k1_enable = sc->sc_nvm_k1_enabled;
   17035 	int rv;
   17036 
   17037 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17038 		device_xname(sc->sc_dev), __func__));
   17039 
   17040 	rv = sc->phy.acquire(sc);
   17041 	if (rv != 0)
   17042 		return rv;
   17043 
   17044 	if (link) {
   17045 		k1_enable = 0;
   17046 
   17047 		/* Link stall fix for link up */
   17048 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17049 		    0x0100);
   17050 	} else {
   17051 		/* Link stall fix for link down */
   17052 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   17053 		    0x4100);
   17054 	}
   17055 
   17056 	wm_configure_k1_ich8lan(sc, k1_enable);
   17057 	sc->phy.release(sc);
   17058 
   17059 	return 0;
   17060 }
   17061 
   17062 /*
   17063  *  wm_k1_workaround_lv - K1 Si workaround
   17064  *  @sc:   pointer to the HW structure
   17065  *
   17066  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   17067  *  Disable K1 for 1000 and 100 speeds
   17068  */
   17069 static int
   17070 wm_k1_workaround_lv(struct wm_softc *sc)
   17071 {
   17072 	uint32_t reg;
   17073 	uint16_t phyreg;
   17074 	int rv;
   17075 
   17076 	if (sc->sc_type != WM_T_PCH2)
   17077 		return 0;
   17078 
   17079 	/* Set K1 beacon duration based on 10Mbps speed */
   17080 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   17081 	if (rv != 0)
   17082 		return rv;
   17083 
   17084 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   17085 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   17086 		if (phyreg &
   17087 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   17088 			/* LV 1G/100 Packet drop issue wa  */
   17089 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   17090 			    &phyreg);
   17091 			if (rv != 0)
   17092 				return rv;
   17093 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   17094 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   17095 			    phyreg);
   17096 			if (rv != 0)
   17097 				return rv;
   17098 		} else {
   17099 			/* For 10Mbps */
   17100 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   17101 			reg &= ~FEXTNVM4_BEACON_DURATION;
   17102 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   17103 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   17104 		}
   17105 	}
   17106 
   17107 	return 0;
   17108 }
   17109 
   17110 /*
   17111  *  wm_link_stall_workaround_hv - Si workaround
   17112  *  @sc: pointer to the HW structure
   17113  *
   17114  *  This function works around a Si bug where the link partner can get
   17115  *  a link up indication before the PHY does. If small packets are sent
   17116  *  by the link partner they can be placed in the packet buffer without
   17117  *  being properly accounted for by the PHY and will stall preventing
   17118  *  further packets from being received.  The workaround is to clear the
   17119  *  packet buffer after the PHY detects link up.
   17120  */
   17121 static int
   17122 wm_link_stall_workaround_hv(struct wm_softc *sc)
   17123 {
   17124 	uint16_t phyreg;
   17125 
   17126 	if (sc->sc_phytype != WMPHY_82578)
   17127 		return 0;
   17128 
   17129 	/* Do not apply workaround if in PHY loopback bit 14 set */
   17130 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   17131 	if ((phyreg & BMCR_LOOP) != 0)
   17132 		return 0;
   17133 
   17134 	/* Check if link is up and at 1Gbps */
   17135 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   17136 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17137 	    | BM_CS_STATUS_SPEED_MASK;
   17138 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   17139 		| BM_CS_STATUS_SPEED_1000))
   17140 		return 0;
   17141 
   17142 	delay(200 * 1000);	/* XXX too big */
   17143 
   17144 	/* Flush the packets in the fifo buffer */
   17145 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17146 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   17147 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   17148 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   17149 
   17150 	return 0;
   17151 }
   17152 
   17153 static int
   17154 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   17155 {
   17156 	int rv;
   17157 
   17158 	rv = sc->phy.acquire(sc);
   17159 	if (rv != 0) {
   17160 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   17161 		    __func__);
   17162 		return rv;
   17163 	}
   17164 
   17165 	rv = wm_set_mdio_slow_mode_hv_locked(sc);
   17166 
   17167 	sc->phy.release(sc);
   17168 
   17169 	return rv;
   17170 }
   17171 
   17172 static int
   17173 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
   17174 {
   17175 	int rv;
   17176 	uint16_t reg;
   17177 
   17178 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   17179 	if (rv != 0)
   17180 		return rv;
   17181 
   17182 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   17183 	    reg | HV_KMRN_MDIO_SLOW);
   17184 }
   17185 
   17186 /*
   17187  *  wm_configure_k1_ich8lan - Configure K1 power state
   17188  *  @sc: pointer to the HW structure
   17189  *  @enable: K1 state to configure
   17190  *
   17191  *  Configure the K1 power state based on the provided parameter.
   17192  *  Assumes semaphore already acquired.
   17193  */
   17194 static void
   17195 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   17196 {
   17197 	uint32_t ctrl, ctrl_ext, tmp;
   17198 	uint16_t kmreg;
   17199 	int rv;
   17200 
   17201 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17202 
   17203 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   17204 	if (rv != 0)
   17205 		return;
   17206 
   17207 	if (k1_enable)
   17208 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   17209 	else
   17210 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   17211 
   17212 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   17213 	if (rv != 0)
   17214 		return;
   17215 
   17216 	delay(20);
   17217 
   17218 	ctrl = CSR_READ(sc, WMREG_CTRL);
   17219 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   17220 
   17221 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   17222 	tmp |= CTRL_FRCSPD;
   17223 
   17224 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   17225 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   17226 	CSR_WRITE_FLUSH(sc);
   17227 	delay(20);
   17228 
   17229 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   17230 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   17231 	CSR_WRITE_FLUSH(sc);
   17232 	delay(20);
   17233 
   17234 	return;
   17235 }
   17236 
   17237 /* special case - for 82575 - need to do manual init ... */
   17238 static void
   17239 wm_reset_init_script_82575(struct wm_softc *sc)
   17240 {
   17241 	/*
   17242 	 * Remark: this is untested code - we have no board without EEPROM
   17243 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   17244 	 */
   17245 
   17246 	/* SerDes configuration via SERDESCTRL */
   17247 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   17248 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   17249 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   17250 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   17251 
   17252 	/* CCM configuration via CCMCTL register */
   17253 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   17254 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   17255 
   17256 	/* PCIe lanes configuration */
   17257 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   17258 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   17259 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   17260 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   17261 
   17262 	/* PCIe PLL Configuration */
   17263 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   17264 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   17265 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   17266 }
   17267 
   17268 static void
   17269 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   17270 {
   17271 	uint32_t reg;
   17272 	uint16_t nvmword;
   17273 	int rv;
   17274 
   17275 	if (sc->sc_type != WM_T_82580)
   17276 		return;
   17277 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   17278 		return;
   17279 
   17280 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   17281 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   17282 	if (rv != 0) {
   17283 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   17284 		    __func__);
   17285 		return;
   17286 	}
   17287 
   17288 	reg = CSR_READ(sc, WMREG_MDICNFG);
   17289 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   17290 		reg |= MDICNFG_DEST;
   17291 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   17292 		reg |= MDICNFG_COM_MDIO;
   17293 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17294 }
   17295 
   17296 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   17297 
   17298 static bool
   17299 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   17300 {
   17301 	uint32_t reg;
   17302 	uint16_t id1, id2;
   17303 	int i, rv;
   17304 
   17305 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17306 		device_xname(sc->sc_dev), __func__));
   17307 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   17308 
   17309 	id1 = id2 = 0xffff;
   17310 	for (i = 0; i < 2; i++) {
   17311 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17312 		    &id1);
   17313 		if ((rv != 0) || MII_INVALIDID(id1))
   17314 			continue;
   17315 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17316 		    &id2);
   17317 		if ((rv != 0) || MII_INVALIDID(id2))
   17318 			continue;
   17319 		break;
   17320 	}
   17321 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   17322 		goto out;
   17323 
   17324 	/*
   17325 	 * In case the PHY needs to be in mdio slow mode,
   17326 	 * set slow mode and try to get the PHY id again.
   17327 	 */
   17328 	rv = 0;
   17329 	if (sc->sc_type < WM_T_PCH_LPT) {
   17330 		wm_set_mdio_slow_mode_hv_locked(sc);
   17331 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   17332 		    &id1);
   17333 		rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   17334 		    &id2);
   17335 	}
   17336 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   17337 		device_printf(sc->sc_dev, "XXX return with false\n");
   17338 		return false;
   17339 	}
   17340 out:
   17341 	if (sc->sc_type >= WM_T_PCH_LPT) {
   17342 		/* Only unforce SMBus if ME is not active */
   17343 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   17344 			uint16_t phyreg;
   17345 
   17346 			/* Unforce SMBus mode in PHY */
   17347 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   17348 			    CV_SMB_CTRL, &phyreg);
   17349 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   17350 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   17351 			    CV_SMB_CTRL, phyreg);
   17352 
   17353 			/* Unforce SMBus mode in MAC */
   17354 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17355 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   17356 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17357 		}
   17358 	}
   17359 	return true;
   17360 }
   17361 
   17362 static void
   17363 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   17364 {
   17365 	uint32_t reg;
   17366 	int i;
   17367 
   17368 	/* Set PHY Config Counter to 50msec */
   17369 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   17370 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   17371 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   17372 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   17373 
   17374 	/* Toggle LANPHYPC */
   17375 	reg = CSR_READ(sc, WMREG_CTRL);
   17376 	reg |= CTRL_LANPHYPC_OVERRIDE;
   17377 	reg &= ~CTRL_LANPHYPC_VALUE;
   17378 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17379 	CSR_WRITE_FLUSH(sc);
   17380 	delay(1000);
   17381 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   17382 	CSR_WRITE(sc, WMREG_CTRL, reg);
   17383 	CSR_WRITE_FLUSH(sc);
   17384 
   17385 	if (sc->sc_type < WM_T_PCH_LPT)
   17386 		delay(50 * 1000);
   17387 	else {
   17388 		i = 20;
   17389 
   17390 		do {
   17391 			delay(5 * 1000);
   17392 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   17393 		    && i--);
   17394 
   17395 		delay(30 * 1000);
   17396 	}
   17397 }
   17398 
   17399 static int
   17400 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   17401 {
   17402 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   17403 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   17404 	uint32_t rxa;
   17405 	uint16_t scale = 0, lat_enc = 0;
   17406 	int32_t obff_hwm = 0;
   17407 	int64_t lat_ns, value;
   17408 
   17409 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17410 		device_xname(sc->sc_dev), __func__));
   17411 
   17412 	if (link) {
   17413 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   17414 		uint32_t status;
   17415 		uint16_t speed;
   17416 		pcireg_t preg;
   17417 
   17418 		status = CSR_READ(sc, WMREG_STATUS);
   17419 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   17420 		case STATUS_SPEED_10:
   17421 			speed = 10;
   17422 			break;
   17423 		case STATUS_SPEED_100:
   17424 			speed = 100;
   17425 			break;
   17426 		case STATUS_SPEED_1000:
   17427 			speed = 1000;
   17428 			break;
   17429 		default:
   17430 			device_printf(sc->sc_dev, "Unknown speed "
   17431 			    "(status = %08x)\n", status);
   17432 			return -1;
   17433 		}
   17434 
   17435 		/* Rx Packet Buffer Allocation size (KB) */
   17436 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   17437 
   17438 		/*
   17439 		 * Determine the maximum latency tolerated by the device.
   17440 		 *
   17441 		 * Per the PCIe spec, the tolerated latencies are encoded as
   17442 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   17443 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   17444 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   17445 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   17446 		 */
   17447 		lat_ns = ((int64_t)rxa * 1024 -
   17448 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   17449 			+ ETHER_HDR_LEN))) * 8 * 1000;
   17450 		if (lat_ns < 0)
   17451 			lat_ns = 0;
   17452 		else
   17453 			lat_ns /= speed;
   17454 		value = lat_ns;
   17455 
   17456 		while (value > LTRV_VALUE) {
   17457 			scale ++;
   17458 			value = howmany(value, __BIT(5));
   17459 		}
   17460 		if (scale > LTRV_SCALE_MAX) {
   17461 			device_printf(sc->sc_dev,
   17462 			    "Invalid LTR latency scale %d\n", scale);
   17463 			return -1;
   17464 		}
   17465 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   17466 
   17467 		/* Determine the maximum latency tolerated by the platform */
   17468 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17469 		    WM_PCI_LTR_CAP_LPT);
   17470 		max_snoop = preg & 0xffff;
   17471 		max_nosnoop = preg >> 16;
   17472 
   17473 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   17474 
   17475 		if (lat_enc > max_ltr_enc) {
   17476 			lat_enc = max_ltr_enc;
   17477 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   17478 			    * PCI_LTR_SCALETONS(
   17479 				    __SHIFTOUT(lat_enc,
   17480 					PCI_LTR_MAXSNOOPLAT_SCALE));
   17481 		}
   17482 
   17483 		if (lat_ns) {
   17484 			lat_ns *= speed * 1000;
   17485 			lat_ns /= 8;
   17486 			lat_ns /= 1000000000;
   17487 			obff_hwm = (int32_t)(rxa - lat_ns);
   17488 		}
   17489 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   17490 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   17491 			    "(rxa = %d, lat_ns = %d)\n",
   17492 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   17493 			return -1;
   17494 		}
   17495 	}
   17496 	/* Snoop and No-Snoop latencies the same */
   17497 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   17498 	CSR_WRITE(sc, WMREG_LTRV, reg);
   17499 
   17500 	/* Set OBFF high water mark */
   17501 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   17502 	reg |= obff_hwm;
   17503 	CSR_WRITE(sc, WMREG_SVT, reg);
   17504 
   17505 	/* Enable OBFF */
   17506 	reg = CSR_READ(sc, WMREG_SVCR);
   17507 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   17508 	CSR_WRITE(sc, WMREG_SVCR, reg);
   17509 
   17510 	return 0;
   17511 }
   17512 
   17513 /*
   17514  * I210 Errata 25 and I211 Errata 10
   17515  * Slow System Clock.
   17516  *
   17517  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   17518  */
   17519 static int
   17520 wm_pll_workaround_i210(struct wm_softc *sc)
   17521 {
   17522 	uint32_t mdicnfg, wuc;
   17523 	uint32_t reg;
   17524 	pcireg_t pcireg;
   17525 	uint32_t pmreg;
   17526 	uint16_t nvmword, tmp_nvmword;
   17527 	uint16_t phyval;
   17528 	bool wa_done = false;
   17529 	int i, rv = 0;
   17530 
   17531 	/* Get Power Management cap offset */
   17532 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   17533 	    &pmreg, NULL) == 0)
   17534 		return -1;
   17535 
   17536 	/* Save WUC and MDICNFG registers */
   17537 	wuc = CSR_READ(sc, WMREG_WUC);
   17538 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   17539 
   17540 	reg = mdicnfg & ~MDICNFG_DEST;
   17541 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17542 
   17543 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   17544 		/*
   17545 		 * The default value of the Initialization Control Word 1
   17546 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   17547 		 */
   17548 		nvmword = INVM_DEFAULT_AL;
   17549 	}
   17550 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   17551 
   17552 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   17553 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   17554 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   17555 
   17556 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   17557 			rv = 0;
   17558 			break; /* OK */
   17559 		} else
   17560 			rv = -1;
   17561 
   17562 		wa_done = true;
   17563 		/* Directly reset the internal PHY */
   17564 		reg = CSR_READ(sc, WMREG_CTRL);
   17565 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   17566 
   17567 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17568 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   17569 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17570 
   17571 		CSR_WRITE(sc, WMREG_WUC, 0);
   17572 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   17573 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17574 
   17575 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17576 		    pmreg + PCI_PMCSR);
   17577 		pcireg |= PCI_PMCSR_STATE_D3;
   17578 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17579 		    pmreg + PCI_PMCSR, pcireg);
   17580 		delay(1000);
   17581 		pcireg &= ~PCI_PMCSR_STATE_D3;
   17582 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17583 		    pmreg + PCI_PMCSR, pcireg);
   17584 
   17585 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   17586 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17587 
   17588 		/* Restore WUC register */
   17589 		CSR_WRITE(sc, WMREG_WUC, wuc);
   17590 	}
   17591 
   17592 	/* Restore MDICNFG setting */
   17593 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   17594 	if (wa_done)
   17595 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   17596 	return rv;
   17597 }
   17598 
   17599 static void
   17600 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   17601 {
   17602 	uint32_t reg;
   17603 
   17604 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17605 		device_xname(sc->sc_dev), __func__));
   17606 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   17607 	    || (sc->sc_type == WM_T_PCH_CNP));
   17608 
   17609 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   17610 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   17611 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   17612 
   17613 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17614 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17615 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17616 }
   17617 
   17618 /* Sysctl functions */
   17619 static int
   17620 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
   17621 {
   17622 	struct sysctlnode node = *rnode;
   17623 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17624 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17625 	struct wm_softc *sc = txq->txq_sc;
   17626 	uint32_t reg;
   17627 
   17628 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
   17629 	node.sysctl_data = &reg;
   17630 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17631 }
   17632 
   17633 static int
   17634 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
   17635 {
   17636 	struct sysctlnode node = *rnode;
   17637 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17638 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17639 	struct wm_softc *sc = txq->txq_sc;
   17640 	uint32_t reg;
   17641 
   17642 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
   17643 	node.sysctl_data = &reg;
   17644 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17645 }
   17646 
   17647 #ifdef WM_DEBUG
   17648 static int
   17649 wm_sysctl_debug(SYSCTLFN_ARGS)
   17650 {
   17651 	struct sysctlnode node = *rnode;
   17652 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17653 	uint32_t dflags;
   17654 	int error;
   17655 
   17656 	dflags = sc->sc_debug;
   17657 	node.sysctl_data = &dflags;
   17658 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17659 
   17660 	if (error || newp == NULL)
   17661 		return error;
   17662 
   17663 	sc->sc_debug = dflags;
   17664 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
   17665 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
   17666 
   17667 	return 0;
   17668 }
   17669 #endif
   17670