Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.697
      1 /*	$NetBSD: if_wm.c,v 1.697 2020/11/19 02:36:30 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.697 2020/11/19 02:36:30 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <net/rss_config.h>
    121 
    122 #include <netinet/in.h>			/* XXX for struct ip */
    123 #include <netinet/in_systm.h>		/* XXX for struct ip */
    124 #include <netinet/ip.h>			/* XXX for struct ip */
    125 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    126 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    127 
    128 #include <sys/bus.h>
    129 #include <sys/intr.h>
    130 #include <machine/endian.h>
    131 
    132 #include <dev/mii/mii.h>
    133 #include <dev/mii/mdio.h>
    134 #include <dev/mii/miivar.h>
    135 #include <dev/mii/miidevs.h>
    136 #include <dev/mii/mii_bitbang.h>
    137 #include <dev/mii/ikphyreg.h>
    138 #include <dev/mii/igphyreg.h>
    139 #include <dev/mii/igphyvar.h>
    140 #include <dev/mii/inbmphyreg.h>
    141 #include <dev/mii/ihphyreg.h>
    142 #include <dev/mii/makphyreg.h>
    143 
    144 #include <dev/pci/pcireg.h>
    145 #include <dev/pci/pcivar.h>
    146 #include <dev/pci/pcidevs.h>
    147 
    148 #include <dev/pci/if_wmreg.h>
    149 #include <dev/pci/if_wmvar.h>
    150 
    151 #ifdef WM_DEBUG
    152 #define	WM_DEBUG_LINK		__BIT(0)
    153 #define	WM_DEBUG_TX		__BIT(1)
    154 #define	WM_DEBUG_RX		__BIT(2)
    155 #define	WM_DEBUG_GMII		__BIT(3)
    156 #define	WM_DEBUG_MANAGE		__BIT(4)
    157 #define	WM_DEBUG_NVM		__BIT(5)
    158 #define	WM_DEBUG_INIT		__BIT(6)
    159 #define	WM_DEBUG_LOCK		__BIT(7)
    160 
    161 #if 0
    162 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    163 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    164 	WM_DEBUG_LOCK
    165 #endif
    166 
    167 #define	DPRINTF(sc, x, y)			  \
    168 	do {					  \
    169 		if ((sc)->sc_debug & (x))	  \
    170 			printf y;		  \
    171 	} while (0)
    172 #else
    173 #define	DPRINTF(sc, x, y)	__nothing
    174 #endif /* WM_DEBUG */
    175 
    176 #ifdef NET_MPSAFE
    177 #define WM_MPSAFE	1
    178 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    179 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    180 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    181 #else
    182 #define WM_CALLOUT_FLAGS	0
    183 #define WM_SOFTINT_FLAGS	0
    184 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    185 #endif
    186 
    187 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    188 
    189 /*
    190  * This device driver's max interrupt numbers.
    191  */
    192 #define WM_MAX_NQUEUEINTR	16
    193 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    194 
    195 #ifndef WM_DISABLE_MSI
    196 #define	WM_DISABLE_MSI 0
    197 #endif
    198 #ifndef WM_DISABLE_MSIX
    199 #define	WM_DISABLE_MSIX 0
    200 #endif
    201 
    202 int wm_disable_msi = WM_DISABLE_MSI;
    203 int wm_disable_msix = WM_DISABLE_MSIX;
    204 
    205 #ifndef WM_WATCHDOG_TIMEOUT
    206 #define WM_WATCHDOG_TIMEOUT 5
    207 #endif
    208 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    209 
    210 /*
    211  * Transmit descriptor list size.  Due to errata, we can only have
    212  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    213  * on >= 82544. We tell the upper layers that they can queue a lot
    214  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    215  * of them at a time.
    216  *
    217  * We allow up to 64 DMA segments per packet.  Pathological packet
    218  * chains containing many small mbufs have been observed in zero-copy
    219  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    220  * m_defrag() is called to reduce it.
    221  */
    222 #define	WM_NTXSEGS		64
    223 #define	WM_IFQUEUELEN		256
    224 #define	WM_TXQUEUELEN_MAX	64
    225 #define	WM_TXQUEUELEN_MAX_82547	16
    226 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    227 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    228 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    229 #define	WM_NTXDESC_82542	256
    230 #define	WM_NTXDESC_82544	4096
    231 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    232 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    233 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    234 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    235 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    236 
    237 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    238 
    239 #define	WM_TXINTERQSIZE		256
    240 
    241 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    242 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    243 #endif
    244 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    245 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    246 #endif
    247 
    248 /*
    249  * Receive descriptor list size.  We have one Rx buffer for normal
    250  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    251  * packet.  We allocate 256 receive descriptors, each with a 2k
    252  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    253  */
    254 #define	WM_NRXDESC		256U
    255 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    256 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    257 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    258 
    259 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    260 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    261 #endif
    262 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    263 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    264 #endif
    265 
    266 typedef union txdescs {
    267 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    268 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    269 } txdescs_t;
    270 
    271 typedef union rxdescs {
    272 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    273 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    274 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    275 } rxdescs_t;
    276 
    277 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    278 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    279 
    280 /*
    281  * Software state for transmit jobs.
    282  */
    283 struct wm_txsoft {
    284 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    285 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    286 	int txs_firstdesc;		/* first descriptor in packet */
    287 	int txs_lastdesc;		/* last descriptor in packet */
    288 	int txs_ndesc;			/* # of descriptors used */
    289 };
    290 
    291 /*
    292  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    293  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    294  * them together.
    295  */
    296 struct wm_rxsoft {
    297 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    298 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    299 };
    300 
    301 #define WM_LINKUP_TIMEOUT	50
    302 
    303 static uint16_t swfwphysem[] = {
    304 	SWFW_PHY0_SM,
    305 	SWFW_PHY1_SM,
    306 	SWFW_PHY2_SM,
    307 	SWFW_PHY3_SM
    308 };
    309 
    310 static const uint32_t wm_82580_rxpbs_table[] = {
    311 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    312 };
    313 
    314 struct wm_softc;
    315 
    316 #ifdef WM_EVENT_COUNTERS
    317 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    318 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    319 	struct evcnt qname##_ev_##evname;
    320 
    321 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    322 	do {								\
    323 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    324 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    325 		    "%s%02d%s", #qname, (qnum), #evname);		\
    326 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    327 		    (evtype), NULL, (xname),				\
    328 		    (q)->qname##_##evname##_evcnt_name);		\
    329 	} while (0)
    330 
    331 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    332 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    333 
    334 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    335 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    336 
    337 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    338 	evcnt_detach(&(q)->qname##_ev_##evname);
    339 #endif /* WM_EVENT_COUNTERS */
    340 
    341 struct wm_txqueue {
    342 	kmutex_t *txq_lock;		/* lock for tx operations */
    343 
    344 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    345 
    346 	/* Software state for the transmit descriptors. */
    347 	int txq_num;			/* must be a power of two */
    348 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    349 
    350 	/* TX control data structures. */
    351 	int txq_ndesc;			/* must be a power of two */
    352 	size_t txq_descsize;		/* a tx descriptor size */
    353 	txdescs_t *txq_descs_u;
    354 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    355 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    356 	int txq_desc_rseg;		/* real number of control segment */
    357 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    358 #define	txq_descs	txq_descs_u->sctxu_txdescs
    359 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    360 
    361 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    362 
    363 	int txq_free;			/* number of free Tx descriptors */
    364 	int txq_next;			/* next ready Tx descriptor */
    365 
    366 	int txq_sfree;			/* number of free Tx jobs */
    367 	int txq_snext;			/* next free Tx job */
    368 	int txq_sdirty;			/* dirty Tx jobs */
    369 
    370 	/* These 4 variables are used only on the 82547. */
    371 	int txq_fifo_size;		/* Tx FIFO size */
    372 	int txq_fifo_head;		/* current head of FIFO */
    373 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    374 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    375 
    376 	/*
    377 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    378 	 * CPUs. This queue intermediate them without block.
    379 	 */
    380 	pcq_t *txq_interq;
    381 
    382 	/*
    383 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    384 	 * to manage Tx H/W queue's busy flag.
    385 	 */
    386 	int txq_flags;			/* flags for H/W queue, see below */
    387 #define	WM_TXQ_NO_SPACE		0x1
    388 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    389 
    390 	bool txq_stopping;
    391 
    392 	bool txq_sending;
    393 	time_t txq_lastsent;
    394 
    395 	/* Checksum flags used for previous packet */
    396 	uint32_t 	txq_last_hw_cmd;
    397 	uint8_t 	txq_last_hw_fields;
    398 	uint16_t	txq_last_hw_ipcs;
    399 	uint16_t	txq_last_hw_tucs;
    400 
    401 	uint32_t txq_packets;		/* for AIM */
    402 	uint32_t txq_bytes;		/* for AIM */
    403 #ifdef WM_EVENT_COUNTERS
    404 	/* TX event counters */
    405 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    406 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    407 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    408 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    409 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    410 					    /* XXX not used? */
    411 
    412 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    413 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    414 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    415 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    416 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    417 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    418 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    419 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    420 					    /* other than toomanyseg */
    421 
    422 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    423 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    424 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    425 	WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
    426 
    427 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    428 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    429 #endif /* WM_EVENT_COUNTERS */
    430 };
    431 
    432 struct wm_rxqueue {
    433 	kmutex_t *rxq_lock;		/* lock for rx operations */
    434 
    435 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    436 
    437 	/* Software state for the receive descriptors. */
    438 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    439 
    440 	/* RX control data structures. */
    441 	int rxq_ndesc;			/* must be a power of two */
    442 	size_t rxq_descsize;		/* a rx descriptor size */
    443 	rxdescs_t *rxq_descs_u;
    444 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    445 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    446 	int rxq_desc_rseg;		/* real number of control segment */
    447 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    448 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    449 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    450 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    451 
    452 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    453 
    454 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    455 	int rxq_discard;
    456 	int rxq_len;
    457 	struct mbuf *rxq_head;
    458 	struct mbuf *rxq_tail;
    459 	struct mbuf **rxq_tailp;
    460 
    461 	bool rxq_stopping;
    462 
    463 	uint32_t rxq_packets;		/* for AIM */
    464 	uint32_t rxq_bytes;		/* for AIM */
    465 #ifdef WM_EVENT_COUNTERS
    466 	/* RX event counters */
    467 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    468 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    469 
    470 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    471 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    472 #endif
    473 };
    474 
    475 struct wm_queue {
    476 	int wmq_id;			/* index of TX/RX queues */
    477 	int wmq_intr_idx;		/* index of MSI-X tables */
    478 
    479 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    480 	bool wmq_set_itr;
    481 
    482 	struct wm_txqueue wmq_txq;
    483 	struct wm_rxqueue wmq_rxq;
    484 	char sysctlname[32];		/* Name for sysctl */
    485 
    486 	bool wmq_txrx_use_workqueue;
    487 	struct work wmq_cookie;
    488 	void *wmq_si;
    489 };
    490 
    491 struct wm_phyop {
    492 	int (*acquire)(struct wm_softc *);
    493 	void (*release)(struct wm_softc *);
    494 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    495 	int (*writereg_locked)(device_t, int, int, uint16_t);
    496 	int reset_delay_us;
    497 	bool no_errprint;
    498 };
    499 
    500 struct wm_nvmop {
    501 	int (*acquire)(struct wm_softc *);
    502 	void (*release)(struct wm_softc *);
    503 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    504 };
    505 
    506 /*
    507  * Software state per device.
    508  */
    509 struct wm_softc {
    510 	device_t sc_dev;		/* generic device information */
    511 	bus_space_tag_t sc_st;		/* bus space tag */
    512 	bus_space_handle_t sc_sh;	/* bus space handle */
    513 	bus_size_t sc_ss;		/* bus space size */
    514 	bus_space_tag_t sc_iot;		/* I/O space tag */
    515 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    516 	bus_size_t sc_ios;		/* I/O space size */
    517 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    518 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    519 	bus_size_t sc_flashs;		/* flash registers space size */
    520 	off_t sc_flashreg_offset;	/*
    521 					 * offset to flash registers from
    522 					 * start of BAR
    523 					 */
    524 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    525 
    526 	struct ethercom sc_ethercom;	/* ethernet common data */
    527 	struct mii_data sc_mii;		/* MII/media information */
    528 
    529 	pci_chipset_tag_t sc_pc;
    530 	pcitag_t sc_pcitag;
    531 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    532 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    533 
    534 	uint16_t sc_pcidevid;		/* PCI device ID */
    535 	wm_chip_type sc_type;		/* MAC type */
    536 	int sc_rev;			/* MAC revision */
    537 	wm_phy_type sc_phytype;		/* PHY type */
    538 	uint8_t sc_sfptype;		/* SFP type */
    539 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    540 #define	WM_MEDIATYPE_UNKNOWN		0x00
    541 #define	WM_MEDIATYPE_FIBER		0x01
    542 #define	WM_MEDIATYPE_COPPER		0x02
    543 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    544 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    545 	int sc_flags;			/* flags; see below */
    546 	u_short sc_if_flags;		/* last if_flags */
    547 	int sc_ec_capenable;		/* last ec_capenable */
    548 	int sc_flowflags;		/* 802.3x flow control flags */
    549 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    550 	int sc_align_tweak;
    551 
    552 	void *sc_ihs[WM_MAX_NINTR];	/*
    553 					 * interrupt cookie.
    554 					 * - legacy and msi use sc_ihs[0] only
    555 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    556 					 */
    557 	pci_intr_handle_t *sc_intrs;	/*
    558 					 * legacy and msi use sc_intrs[0] only
    559 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    560 					 */
    561 	int sc_nintrs;			/* number of interrupts */
    562 
    563 	int sc_link_intr_idx;		/* index of MSI-X tables */
    564 
    565 	callout_t sc_tick_ch;		/* tick callout */
    566 	bool sc_core_stopping;
    567 
    568 	int sc_nvm_ver_major;
    569 	int sc_nvm_ver_minor;
    570 	int sc_nvm_ver_build;
    571 	int sc_nvm_addrbits;		/* NVM address bits */
    572 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    573 	int sc_ich8_flash_base;
    574 	int sc_ich8_flash_bank_size;
    575 	int sc_nvm_k1_enabled;
    576 
    577 	int sc_nqueues;
    578 	struct wm_queue *sc_queue;
    579 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    580 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    581 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    582 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    583 	struct workqueue *sc_queue_wq;
    584 	bool sc_txrx_use_workqueue;
    585 
    586 	int sc_affinity_offset;
    587 
    588 #ifdef WM_EVENT_COUNTERS
    589 	/* Event counters. */
    590 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    591 
    592 	/* WM_T_82542_2_1 only */
    593 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    594 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    595 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    596 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    597 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    598 #endif /* WM_EVENT_COUNTERS */
    599 
    600 	struct sysctllog *sc_sysctllog;
    601 
    602 	/* This variable are used only on the 82547. */
    603 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    604 
    605 	uint32_t sc_ctrl;		/* prototype CTRL register */
    606 #if 0
    607 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    608 #endif
    609 	uint32_t sc_icr;		/* prototype interrupt bits */
    610 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    611 	uint32_t sc_tctl;		/* prototype TCTL register */
    612 	uint32_t sc_rctl;		/* prototype RCTL register */
    613 	uint32_t sc_txcw;		/* prototype TXCW register */
    614 	uint32_t sc_tipg;		/* prototype TIPG register */
    615 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    616 	uint32_t sc_pba;		/* prototype PBA register */
    617 
    618 	int sc_tbi_linkup;		/* TBI link status */
    619 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    620 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    621 
    622 	int sc_mchash_type;		/* multicast filter offset */
    623 
    624 	krndsource_t rnd_source;	/* random source */
    625 
    626 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    627 
    628 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    629 	kmutex_t *sc_ich_phymtx;	/*
    630 					 * 82574/82583/ICH/PCH specific PHY
    631 					 * mutex. For 82574/82583, the mutex
    632 					 * is used for both PHY and NVM.
    633 					 */
    634 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    635 
    636 	struct wm_phyop phy;
    637 	struct wm_nvmop nvm;
    638 #ifdef WM_DEBUG
    639 	uint32_t sc_debug;
    640 #endif
    641 };
    642 
    643 #define WM_CORE_LOCK(_sc)						\
    644 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    645 #define WM_CORE_UNLOCK(_sc)						\
    646 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    647 #define WM_CORE_LOCKED(_sc)						\
    648 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    649 
    650 #define	WM_RXCHAIN_RESET(rxq)						\
    651 do {									\
    652 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    653 	*(rxq)->rxq_tailp = NULL;					\
    654 	(rxq)->rxq_len = 0;						\
    655 } while (/*CONSTCOND*/0)
    656 
    657 #define	WM_RXCHAIN_LINK(rxq, m)						\
    658 do {									\
    659 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    660 	(rxq)->rxq_tailp = &(m)->m_next;				\
    661 } while (/*CONSTCOND*/0)
    662 
    663 #ifdef WM_EVENT_COUNTERS
    664 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    665 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    666 
    667 #define WM_Q_EVCNT_INCR(qname, evname)			\
    668 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    669 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    670 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    671 #else /* !WM_EVENT_COUNTERS */
    672 #define	WM_EVCNT_INCR(ev)	/* nothing */
    673 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    674 
    675 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    676 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    677 #endif /* !WM_EVENT_COUNTERS */
    678 
    679 #define	CSR_READ(sc, reg)						\
    680 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    681 #define	CSR_WRITE(sc, reg, val)						\
    682 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    683 #define	CSR_WRITE_FLUSH(sc)						\
    684 	(void)CSR_READ((sc), WMREG_STATUS)
    685 
    686 #define ICH8_FLASH_READ32(sc, reg)					\
    687 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    688 	    (reg) + sc->sc_flashreg_offset)
    689 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    690 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    691 	    (reg) + sc->sc_flashreg_offset, (data))
    692 
    693 #define ICH8_FLASH_READ16(sc, reg)					\
    694 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    695 	    (reg) + sc->sc_flashreg_offset)
    696 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    697 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    698 	    (reg) + sc->sc_flashreg_offset, (data))
    699 
    700 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    701 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    702 
    703 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    704 #define	WM_CDTXADDR_HI(txq, x)						\
    705 	(sizeof(bus_addr_t) == 8 ?					\
    706 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    707 
    708 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    709 #define	WM_CDRXADDR_HI(rxq, x)						\
    710 	(sizeof(bus_addr_t) == 8 ?					\
    711 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    712 
    713 /*
    714  * Register read/write functions.
    715  * Other than CSR_{READ|WRITE}().
    716  */
    717 #if 0
    718 static inline uint32_t wm_io_read(struct wm_softc *, int);
    719 #endif
    720 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    721 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    722     uint32_t, uint32_t);
    723 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    724 
    725 /*
    726  * Descriptor sync/init functions.
    727  */
    728 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    729 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    730 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    731 
    732 /*
    733  * Device driver interface functions and commonly used functions.
    734  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    735  */
    736 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    737 static int	wm_match(device_t, cfdata_t, void *);
    738 static void	wm_attach(device_t, device_t, void *);
    739 static int	wm_detach(device_t, int);
    740 static bool	wm_suspend(device_t, const pmf_qual_t *);
    741 static bool	wm_resume(device_t, const pmf_qual_t *);
    742 static void	wm_watchdog(struct ifnet *);
    743 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    744     uint16_t *);
    745 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    746     uint16_t *);
    747 static void	wm_tick(void *);
    748 static int	wm_ifflags_cb(struct ethercom *);
    749 static int	wm_ioctl(struct ifnet *, u_long, void *);
    750 /* MAC address related */
    751 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    752 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    753 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    754 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    755 static int	wm_rar_count(struct wm_softc *);
    756 static void	wm_set_filter(struct wm_softc *);
    757 /* Reset and init related */
    758 static void	wm_set_vlan(struct wm_softc *);
    759 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    760 static void	wm_get_auto_rd_done(struct wm_softc *);
    761 static void	wm_lan_init_done(struct wm_softc *);
    762 static void	wm_get_cfg_done(struct wm_softc *);
    763 static int	wm_phy_post_reset(struct wm_softc *);
    764 static int	wm_write_smbus_addr(struct wm_softc *);
    765 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    766 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    767 static void	wm_initialize_hardware_bits(struct wm_softc *);
    768 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    769 static int	wm_reset_phy(struct wm_softc *);
    770 static void	wm_flush_desc_rings(struct wm_softc *);
    771 static void	wm_reset(struct wm_softc *);
    772 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    773 static void	wm_rxdrain(struct wm_rxqueue *);
    774 static void	wm_init_rss(struct wm_softc *);
    775 static void	wm_adjust_qnum(struct wm_softc *, int);
    776 static inline bool	wm_is_using_msix(struct wm_softc *);
    777 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    778 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    779 static int	wm_setup_legacy(struct wm_softc *);
    780 static int	wm_setup_msix(struct wm_softc *);
    781 static int	wm_init(struct ifnet *);
    782 static int	wm_init_locked(struct ifnet *);
    783 static void	wm_init_sysctls(struct wm_softc *);
    784 static void	wm_unset_stopping_flags(struct wm_softc *);
    785 static void	wm_set_stopping_flags(struct wm_softc *);
    786 static void	wm_stop(struct ifnet *, int);
    787 static void	wm_stop_locked(struct ifnet *, bool, bool);
    788 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    789 static void	wm_82547_txfifo_stall(void *);
    790 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    791 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    792 /* DMA related */
    793 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    794 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    795 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    796 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    797     struct wm_txqueue *);
    798 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    799 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    800 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    801     struct wm_rxqueue *);
    802 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    803 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    804 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    805 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    806 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    807 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    808 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    809     struct wm_txqueue *);
    810 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    811     struct wm_rxqueue *);
    812 static int	wm_alloc_txrx_queues(struct wm_softc *);
    813 static void	wm_free_txrx_queues(struct wm_softc *);
    814 static int	wm_init_txrx_queues(struct wm_softc *);
    815 /* Start */
    816 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    817     struct wm_txsoft *, uint32_t *, uint8_t *);
    818 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    819 static void	wm_start(struct ifnet *);
    820 static void	wm_start_locked(struct ifnet *);
    821 static int	wm_transmit(struct ifnet *, struct mbuf *);
    822 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    823 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    824 		    bool);
    825 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    826     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    827 static void	wm_nq_start(struct ifnet *);
    828 static void	wm_nq_start_locked(struct ifnet *);
    829 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    830 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    831 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    832 		    bool);
    833 static void	wm_deferred_start_locked(struct wm_txqueue *);
    834 static void	wm_handle_queue(void *);
    835 static void	wm_handle_queue_work(struct work *, void *);
    836 /* Interrupt */
    837 static bool	wm_txeof(struct wm_txqueue *, u_int);
    838 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    839 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    840 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    841 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    842 static void	wm_linkintr(struct wm_softc *, uint32_t);
    843 static int	wm_intr_legacy(void *);
    844 static inline void	wm_txrxintr_disable(struct wm_queue *);
    845 static inline void	wm_txrxintr_enable(struct wm_queue *);
    846 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    847 static int	wm_txrxintr_msix(void *);
    848 static int	wm_linkintr_msix(void *);
    849 
    850 /*
    851  * Media related.
    852  * GMII, SGMII, TBI, SERDES and SFP.
    853  */
    854 /* Common */
    855 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    856 /* GMII related */
    857 static void	wm_gmii_reset(struct wm_softc *);
    858 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    859 static int	wm_get_phy_id_82575(struct wm_softc *);
    860 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    861 static int	wm_gmii_mediachange(struct ifnet *);
    862 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    863 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    864 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    865 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    866 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    867 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    868 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    869 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    870 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    871 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    872 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    873 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    874 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    875 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    876 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    877 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    878 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    879 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    880 	bool);
    881 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    882 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    883 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    884 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    885 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    886 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    887 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    888 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    889 static void	wm_gmii_statchg(struct ifnet *);
    890 /*
    891  * kumeran related (80003, ICH* and PCH*).
    892  * These functions are not for accessing MII registers but for accessing
    893  * kumeran specific registers.
    894  */
    895 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    896 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    897 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    898 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    899 /* EMI register related */
    900 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    901 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    902 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    903 /* SGMII */
    904 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    905 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    906 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    907 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    908 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    909 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    910 /* TBI related */
    911 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    912 static void	wm_tbi_mediainit(struct wm_softc *);
    913 static int	wm_tbi_mediachange(struct ifnet *);
    914 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    915 static int	wm_check_for_link(struct wm_softc *);
    916 static void	wm_tbi_tick(struct wm_softc *);
    917 /* SERDES related */
    918 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    919 static int	wm_serdes_mediachange(struct ifnet *);
    920 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    921 static void	wm_serdes_tick(struct wm_softc *);
    922 /* SFP related */
    923 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    924 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    925 
    926 /*
    927  * NVM related.
    928  * Microwire, SPI (w/wo EERD) and Flash.
    929  */
    930 /* Misc functions */
    931 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    932 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    933 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    934 /* Microwire */
    935 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    936 /* SPI */
    937 static int	wm_nvm_ready_spi(struct wm_softc *);
    938 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    939 /* Using with EERD */
    940 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    941 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    942 /* Flash */
    943 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    944     unsigned int *);
    945 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    946 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    947 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    948     uint32_t *);
    949 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    950 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    951 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    952 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    953 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    954 /* iNVM */
    955 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    956 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    957 /* Lock, detecting NVM type, validate checksum and read */
    958 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    959 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    960 static int	wm_nvm_validate_checksum(struct wm_softc *);
    961 static void	wm_nvm_version_invm(struct wm_softc *);
    962 static void	wm_nvm_version(struct wm_softc *);
    963 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    964 
    965 /*
    966  * Hardware semaphores.
    967  * Very complexed...
    968  */
    969 static int	wm_get_null(struct wm_softc *);
    970 static void	wm_put_null(struct wm_softc *);
    971 static int	wm_get_eecd(struct wm_softc *);
    972 static void	wm_put_eecd(struct wm_softc *);
    973 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    974 static void	wm_put_swsm_semaphore(struct wm_softc *);
    975 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    976 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    977 static int	wm_get_nvm_80003(struct wm_softc *);
    978 static void	wm_put_nvm_80003(struct wm_softc *);
    979 static int	wm_get_nvm_82571(struct wm_softc *);
    980 static void	wm_put_nvm_82571(struct wm_softc *);
    981 static int	wm_get_phy_82575(struct wm_softc *);
    982 static void	wm_put_phy_82575(struct wm_softc *);
    983 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    984 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    985 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    986 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    987 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    988 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    989 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    990 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    991 
    992 /*
    993  * Management mode and power management related subroutines.
    994  * BMC, AMT, suspend/resume and EEE.
    995  */
    996 #if 0
    997 static int	wm_check_mng_mode(struct wm_softc *);
    998 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    999 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1000 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1001 #endif
   1002 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1003 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1004 static void	wm_get_hw_control(struct wm_softc *);
   1005 static void	wm_release_hw_control(struct wm_softc *);
   1006 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1007 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1008 static void	wm_init_manageability(struct wm_softc *);
   1009 static void	wm_release_manageability(struct wm_softc *);
   1010 static void	wm_get_wakeup(struct wm_softc *);
   1011 static int	wm_ulp_disable(struct wm_softc *);
   1012 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1013 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1014 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1015 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1016 static void	wm_enable_wakeup(struct wm_softc *);
   1017 static void	wm_disable_aspm(struct wm_softc *);
   1018 /* LPLU (Low Power Link Up) */
   1019 static void	wm_lplu_d0_disable(struct wm_softc *);
   1020 /* EEE */
   1021 static int	wm_set_eee_i350(struct wm_softc *);
   1022 static int	wm_set_eee_pchlan(struct wm_softc *);
   1023 static int	wm_set_eee(struct wm_softc *);
   1024 
   1025 /*
   1026  * Workarounds (mainly PHY related).
   1027  * Basically, PHY's workarounds are in the PHY drivers.
   1028  */
   1029 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1030 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1031 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1032 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1033 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1034 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1035 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1036 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1037 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1038 static int	wm_k1_workaround_lv(struct wm_softc *);
   1039 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1040 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1041 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1042 static void	wm_reset_init_script_82575(struct wm_softc *);
   1043 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1044 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1045 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1046 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1047 static int	wm_pll_workaround_i210(struct wm_softc *);
   1048 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1049 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1050 static void	wm_set_linkdown_discard(struct wm_softc *);
   1051 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1052 
   1053 #ifdef WM_DEBUG
   1054 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1055 #endif
   1056 
   1057 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1058     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1059 
   1060 /*
   1061  * Devices supported by this driver.
   1062  */
   1063 static const struct wm_product {
   1064 	pci_vendor_id_t		wmp_vendor;
   1065 	pci_product_id_t	wmp_product;
   1066 	const char		*wmp_name;
   1067 	wm_chip_type		wmp_type;
   1068 	uint32_t		wmp_flags;
   1069 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1070 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1071 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1072 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1073 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1074 } wm_products[] = {
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1076 	  "Intel i82542 1000BASE-X Ethernet",
   1077 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1080 	  "Intel i82543GC 1000BASE-X Ethernet",
   1081 	  WM_T_82543,		WMP_F_FIBER },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1084 	  "Intel i82543GC 1000BASE-T Ethernet",
   1085 	  WM_T_82543,		WMP_F_COPPER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1088 	  "Intel i82544EI 1000BASE-T Ethernet",
   1089 	  WM_T_82544,		WMP_F_COPPER },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1092 	  "Intel i82544EI 1000BASE-X Ethernet",
   1093 	  WM_T_82544,		WMP_F_FIBER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1096 	  "Intel i82544GC 1000BASE-T Ethernet",
   1097 	  WM_T_82544,		WMP_F_COPPER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1100 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1101 	  WM_T_82544,		WMP_F_COPPER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1104 	  "Intel i82540EM 1000BASE-T Ethernet",
   1105 	  WM_T_82540,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1108 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1109 	  WM_T_82540,		WMP_F_COPPER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1112 	  "Intel i82540EP 1000BASE-T Ethernet",
   1113 	  WM_T_82540,		WMP_F_COPPER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1116 	  "Intel i82540EP 1000BASE-T Ethernet",
   1117 	  WM_T_82540,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1120 	  "Intel i82540EP 1000BASE-T Ethernet",
   1121 	  WM_T_82540,		WMP_F_COPPER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1124 	  "Intel i82545EM 1000BASE-T Ethernet",
   1125 	  WM_T_82545,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1128 	  "Intel i82545GM 1000BASE-T Ethernet",
   1129 	  WM_T_82545_3,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1132 	  "Intel i82545GM 1000BASE-X Ethernet",
   1133 	  WM_T_82545_3,		WMP_F_FIBER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1136 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1137 	  WM_T_82545_3,		WMP_F_SERDES },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1140 	  "Intel i82546EB 1000BASE-T Ethernet",
   1141 	  WM_T_82546,		WMP_F_COPPER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1144 	  "Intel i82546EB 1000BASE-T Ethernet",
   1145 	  WM_T_82546,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1148 	  "Intel i82545EM 1000BASE-X Ethernet",
   1149 	  WM_T_82545,		WMP_F_FIBER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1152 	  "Intel i82546EB 1000BASE-X Ethernet",
   1153 	  WM_T_82546,		WMP_F_FIBER },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1156 	  "Intel i82546GB 1000BASE-T Ethernet",
   1157 	  WM_T_82546_3,		WMP_F_COPPER },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1160 	  "Intel i82546GB 1000BASE-X Ethernet",
   1161 	  WM_T_82546_3,		WMP_F_FIBER },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1164 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1165 	  WM_T_82546_3,		WMP_F_SERDES },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1168 	  "i82546GB quad-port Gigabit Ethernet",
   1169 	  WM_T_82546_3,		WMP_F_COPPER },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1172 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1173 	  WM_T_82546_3,		WMP_F_COPPER },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1176 	  "Intel PRO/1000MT (82546GB)",
   1177 	  WM_T_82546_3,		WMP_F_COPPER },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1180 	  "Intel i82541EI 1000BASE-T Ethernet",
   1181 	  WM_T_82541,		WMP_F_COPPER },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1184 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1185 	  WM_T_82541,		WMP_F_COPPER },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1188 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1189 	  WM_T_82541,		WMP_F_COPPER },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1192 	  "Intel i82541ER 1000BASE-T Ethernet",
   1193 	  WM_T_82541_2,		WMP_F_COPPER },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1196 	  "Intel i82541GI 1000BASE-T Ethernet",
   1197 	  WM_T_82541_2,		WMP_F_COPPER },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1200 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1201 	  WM_T_82541_2,		WMP_F_COPPER },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1204 	  "Intel i82541PI 1000BASE-T Ethernet",
   1205 	  WM_T_82541_2,		WMP_F_COPPER },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1208 	  "Intel i82547EI 1000BASE-T Ethernet",
   1209 	  WM_T_82547,		WMP_F_COPPER },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1212 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1213 	  WM_T_82547,		WMP_F_COPPER },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1216 	  "Intel i82547GI 1000BASE-T Ethernet",
   1217 	  WM_T_82547_2,		WMP_F_COPPER },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1220 	  "Intel PRO/1000 PT (82571EB)",
   1221 	  WM_T_82571,		WMP_F_COPPER },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1224 	  "Intel PRO/1000 PF (82571EB)",
   1225 	  WM_T_82571,		WMP_F_FIBER },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1228 	  "Intel PRO/1000 PB (82571EB)",
   1229 	  WM_T_82571,		WMP_F_SERDES },
   1230 
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1232 	  "Intel PRO/1000 QT (82571EB)",
   1233 	  WM_T_82571,		WMP_F_COPPER },
   1234 
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1236 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1237 	  WM_T_82571,		WMP_F_COPPER },
   1238 
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1240 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1241 	  WM_T_82571,		WMP_F_COPPER },
   1242 
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1244 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1245 	  WM_T_82571,		WMP_F_SERDES },
   1246 
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1248 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1249 	  WM_T_82571,		WMP_F_SERDES },
   1250 
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1252 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1253 	  WM_T_82571,		WMP_F_FIBER },
   1254 
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1256 	  "Intel i82572EI 1000baseT Ethernet",
   1257 	  WM_T_82572,		WMP_F_COPPER },
   1258 
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1260 	  "Intel i82572EI 1000baseX Ethernet",
   1261 	  WM_T_82572,		WMP_F_FIBER },
   1262 
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1264 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1265 	  WM_T_82572,		WMP_F_SERDES },
   1266 
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1268 	  "Intel i82572EI 1000baseT Ethernet",
   1269 	  WM_T_82572,		WMP_F_COPPER },
   1270 
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1272 	  "Intel i82573E",
   1273 	  WM_T_82573,		WMP_F_COPPER },
   1274 
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1276 	  "Intel i82573E IAMT",
   1277 	  WM_T_82573,		WMP_F_COPPER },
   1278 
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1280 	  "Intel i82573L Gigabit Ethernet",
   1281 	  WM_T_82573,		WMP_F_COPPER },
   1282 
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1284 	  "Intel i82574L",
   1285 	  WM_T_82574,		WMP_F_COPPER },
   1286 
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1288 	  "Intel i82574L",
   1289 	  WM_T_82574,		WMP_F_COPPER },
   1290 
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1292 	  "Intel i82583V",
   1293 	  WM_T_82583,		WMP_F_COPPER },
   1294 
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1296 	  "i80003 dual 1000baseT Ethernet",
   1297 	  WM_T_80003,		WMP_F_COPPER },
   1298 
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1300 	  "i80003 dual 1000baseX Ethernet",
   1301 	  WM_T_80003,		WMP_F_COPPER },
   1302 
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1304 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1305 	  WM_T_80003,		WMP_F_SERDES },
   1306 
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1308 	  "Intel i80003 1000baseT Ethernet",
   1309 	  WM_T_80003,		WMP_F_COPPER },
   1310 
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1312 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1313 	  WM_T_80003,		WMP_F_SERDES },
   1314 
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1316 	  "Intel i82801H (M_AMT) LAN Controller",
   1317 	  WM_T_ICH8,		WMP_F_COPPER },
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1319 	  "Intel i82801H (AMT) LAN Controller",
   1320 	  WM_T_ICH8,		WMP_F_COPPER },
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1322 	  "Intel i82801H LAN Controller",
   1323 	  WM_T_ICH8,		WMP_F_COPPER },
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1325 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1326 	  WM_T_ICH8,		WMP_F_COPPER },
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1328 	  "Intel i82801H (M) LAN Controller",
   1329 	  WM_T_ICH8,		WMP_F_COPPER },
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1331 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1332 	  WM_T_ICH8,		WMP_F_COPPER },
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1334 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1335 	  WM_T_ICH8,		WMP_F_COPPER },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1337 	  "82567V-3 LAN Controller",
   1338 	  WM_T_ICH8,		WMP_F_COPPER },
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1340 	  "82801I (AMT) LAN Controller",
   1341 	  WM_T_ICH9,		WMP_F_COPPER },
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1343 	  "82801I 10/100 LAN Controller",
   1344 	  WM_T_ICH9,		WMP_F_COPPER },
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1346 	  "82801I (G) 10/100 LAN Controller",
   1347 	  WM_T_ICH9,		WMP_F_COPPER },
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1349 	  "82801I (GT) 10/100 LAN Controller",
   1350 	  WM_T_ICH9,		WMP_F_COPPER },
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1352 	  "82801I (C) LAN Controller",
   1353 	  WM_T_ICH9,		WMP_F_COPPER },
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1355 	  "82801I mobile LAN Controller",
   1356 	  WM_T_ICH9,		WMP_F_COPPER },
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1358 	  "82801I mobile (V) LAN Controller",
   1359 	  WM_T_ICH9,		WMP_F_COPPER },
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1361 	  "82801I mobile (AMT) LAN Controller",
   1362 	  WM_T_ICH9,		WMP_F_COPPER },
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1364 	  "82567LM-4 LAN Controller",
   1365 	  WM_T_ICH9,		WMP_F_COPPER },
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1367 	  "82567LM-2 LAN Controller",
   1368 	  WM_T_ICH10,		WMP_F_COPPER },
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1370 	  "82567LF-2 LAN Controller",
   1371 	  WM_T_ICH10,		WMP_F_COPPER },
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1373 	  "82567LM-3 LAN Controller",
   1374 	  WM_T_ICH10,		WMP_F_COPPER },
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1376 	  "82567LF-3 LAN Controller",
   1377 	  WM_T_ICH10,		WMP_F_COPPER },
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1379 	  "82567V-2 LAN Controller",
   1380 	  WM_T_ICH10,		WMP_F_COPPER },
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1382 	  "82567V-3? LAN Controller",
   1383 	  WM_T_ICH10,		WMP_F_COPPER },
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1385 	  "HANKSVILLE LAN Controller",
   1386 	  WM_T_ICH10,		WMP_F_COPPER },
   1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1388 	  "PCH LAN (82577LM) Controller",
   1389 	  WM_T_PCH,		WMP_F_COPPER },
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1391 	  "PCH LAN (82577LC) Controller",
   1392 	  WM_T_PCH,		WMP_F_COPPER },
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1394 	  "PCH LAN (82578DM) Controller",
   1395 	  WM_T_PCH,		WMP_F_COPPER },
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1397 	  "PCH LAN (82578DC) Controller",
   1398 	  WM_T_PCH,		WMP_F_COPPER },
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1400 	  "PCH2 LAN (82579LM) Controller",
   1401 	  WM_T_PCH2,		WMP_F_COPPER },
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1403 	  "PCH2 LAN (82579V) Controller",
   1404 	  WM_T_PCH2,		WMP_F_COPPER },
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1406 	  "82575EB dual-1000baseT Ethernet",
   1407 	  WM_T_82575,		WMP_F_COPPER },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1409 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1410 	  WM_T_82575,		WMP_F_SERDES },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1412 	  "82575GB quad-1000baseT Ethernet",
   1413 	  WM_T_82575,		WMP_F_COPPER },
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1415 	  "82575GB quad-1000baseT Ethernet (PM)",
   1416 	  WM_T_82575,		WMP_F_COPPER },
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1418 	  "82576 1000BaseT Ethernet",
   1419 	  WM_T_82576,		WMP_F_COPPER },
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1421 	  "82576 1000BaseX Ethernet",
   1422 	  WM_T_82576,		WMP_F_FIBER },
   1423 
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1425 	  "82576 gigabit Ethernet (SERDES)",
   1426 	  WM_T_82576,		WMP_F_SERDES },
   1427 
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1429 	  "82576 quad-1000BaseT Ethernet",
   1430 	  WM_T_82576,		WMP_F_COPPER },
   1431 
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1433 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1434 	  WM_T_82576,		WMP_F_COPPER },
   1435 
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1437 	  "82576 gigabit Ethernet",
   1438 	  WM_T_82576,		WMP_F_COPPER },
   1439 
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1441 	  "82576 gigabit Ethernet (SERDES)",
   1442 	  WM_T_82576,		WMP_F_SERDES },
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1444 	  "82576 quad-gigabit Ethernet (SERDES)",
   1445 	  WM_T_82576,		WMP_F_SERDES },
   1446 
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1448 	  "82580 1000BaseT Ethernet",
   1449 	  WM_T_82580,		WMP_F_COPPER },
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1451 	  "82580 1000BaseX Ethernet",
   1452 	  WM_T_82580,		WMP_F_FIBER },
   1453 
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1455 	  "82580 1000BaseT Ethernet (SERDES)",
   1456 	  WM_T_82580,		WMP_F_SERDES },
   1457 
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1459 	  "82580 gigabit Ethernet (SGMII)",
   1460 	  WM_T_82580,		WMP_F_COPPER },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1462 	  "82580 dual-1000BaseT Ethernet",
   1463 	  WM_T_82580,		WMP_F_COPPER },
   1464 
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1466 	  "82580 quad-1000BaseX Ethernet",
   1467 	  WM_T_82580,		WMP_F_FIBER },
   1468 
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1470 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1471 	  WM_T_82580,		WMP_F_COPPER },
   1472 
   1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1474 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1475 	  WM_T_82580,		WMP_F_SERDES },
   1476 
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1478 	  "DH89XXCC 1000BASE-KX Ethernet",
   1479 	  WM_T_82580,		WMP_F_SERDES },
   1480 
   1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1482 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1483 	  WM_T_82580,		WMP_F_SERDES },
   1484 
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1486 	  "I350 Gigabit Network Connection",
   1487 	  WM_T_I350,		WMP_F_COPPER },
   1488 
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1490 	  "I350 Gigabit Fiber Network Connection",
   1491 	  WM_T_I350,		WMP_F_FIBER },
   1492 
   1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1494 	  "I350 Gigabit Backplane Connection",
   1495 	  WM_T_I350,		WMP_F_SERDES },
   1496 
   1497 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1498 	  "I350 Quad Port Gigabit Ethernet",
   1499 	  WM_T_I350,		WMP_F_SERDES },
   1500 
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1502 	  "I350 Gigabit Connection",
   1503 	  WM_T_I350,		WMP_F_COPPER },
   1504 
   1505 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1506 	  "I354 Gigabit Ethernet (KX)",
   1507 	  WM_T_I354,		WMP_F_SERDES },
   1508 
   1509 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1510 	  "I354 Gigabit Ethernet (SGMII)",
   1511 	  WM_T_I354,		WMP_F_COPPER },
   1512 
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1514 	  "I354 Gigabit Ethernet (2.5G)",
   1515 	  WM_T_I354,		WMP_F_COPPER },
   1516 
   1517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1518 	  "I210-T1 Ethernet Server Adapter",
   1519 	  WM_T_I210,		WMP_F_COPPER },
   1520 
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1522 	  "I210 Ethernet (Copper OEM)",
   1523 	  WM_T_I210,		WMP_F_COPPER },
   1524 
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1526 	  "I210 Ethernet (Copper IT)",
   1527 	  WM_T_I210,		WMP_F_COPPER },
   1528 
   1529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1530 	  "I210 Ethernet (Copper, FLASH less)",
   1531 	  WM_T_I210,		WMP_F_COPPER },
   1532 
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1534 	  "I210 Gigabit Ethernet (Fiber)",
   1535 	  WM_T_I210,		WMP_F_FIBER },
   1536 
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1538 	  "I210 Gigabit Ethernet (SERDES)",
   1539 	  WM_T_I210,		WMP_F_SERDES },
   1540 
   1541 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1542 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1543 	  WM_T_I210,		WMP_F_SERDES },
   1544 
   1545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1546 	  "I210 Gigabit Ethernet (SGMII)",
   1547 	  WM_T_I210,		WMP_F_COPPER },
   1548 
   1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1550 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1551 	  WM_T_I210,		WMP_F_COPPER },
   1552 
   1553 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1554 	  "I211 Ethernet (COPPER)",
   1555 	  WM_T_I211,		WMP_F_COPPER },
   1556 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1557 	  "I217 V Ethernet Connection",
   1558 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1559 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1560 	  "I217 LM Ethernet Connection",
   1561 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1562 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1563 	  "I218 V Ethernet Connection",
   1564 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1565 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1566 	  "I218 V Ethernet Connection",
   1567 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1568 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1569 	  "I218 V Ethernet Connection",
   1570 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1571 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1572 	  "I218 LM Ethernet Connection",
   1573 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1574 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1575 	  "I218 LM Ethernet Connection",
   1576 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1577 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1578 	  "I218 LM Ethernet Connection",
   1579 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1580 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1581 	  "I219 LM Ethernet Connection",
   1582 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1583 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1584 	  "I219 LM Ethernet Connection",
   1585 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1586 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1587 	  "I219 LM Ethernet Connection",
   1588 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1589 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1590 	  "I219 LM Ethernet Connection",
   1591 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1592 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1593 	  "I219 LM Ethernet Connection",
   1594 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1595 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1596 	  "I219 LM Ethernet Connection",
   1597 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1598 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1599 	  "I219 LM Ethernet Connection",
   1600 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1601 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1602 	  "I219 LM Ethernet Connection",
   1603 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1604 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1605 	  "I219 LM Ethernet Connection",
   1606 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1607 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1608 	  "I219 LM Ethernet Connection",
   1609 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1610 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1611 	  "I219 LM Ethernet Connection",
   1612 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1613 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1614 	  "I219 LM Ethernet Connection",
   1615 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1616 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1617 	  "I219 LM Ethernet Connection",
   1618 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1619 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1620 	  "I219 LM Ethernet Connection",
   1621 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1622 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1623 	  "I219 LM Ethernet Connection",
   1624 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1625 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1626 	  "I219 V Ethernet Connection",
   1627 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1628 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1629 	  "I219 V Ethernet Connection",
   1630 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1632 	  "I219 V Ethernet Connection",
   1633 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1635 	  "I219 V Ethernet Connection",
   1636 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1638 	  "I219 V Ethernet Connection",
   1639 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1640 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1641 	  "I219 V Ethernet Connection",
   1642 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1643 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1644 	  "I219 V Ethernet Connection",
   1645 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1647 	  "I219 V Ethernet Connection",
   1648 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1650 	  "I219 V Ethernet Connection",
   1651 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1652 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1653 	  "I219 V Ethernet Connection",
   1654 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1656 	  "I219 V Ethernet Connection",
   1657 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1659 	  "I219 V Ethernet Connection",
   1660 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1662 	  "I219 V Ethernet Connection",
   1663 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1664 	{ 0,			0,
   1665 	  NULL,
   1666 	  0,			0 },
   1667 };
   1668 
   1669 /*
   1670  * Register read/write functions.
   1671  * Other than CSR_{READ|WRITE}().
   1672  */
   1673 
   1674 #if 0 /* Not currently used */
   1675 static inline uint32_t
   1676 wm_io_read(struct wm_softc *sc, int reg)
   1677 {
   1678 
   1679 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1680 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1681 }
   1682 #endif
   1683 
   1684 static inline void
   1685 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1686 {
   1687 
   1688 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1689 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1690 }
   1691 
   1692 static inline void
   1693 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1694     uint32_t data)
   1695 {
   1696 	uint32_t regval;
   1697 	int i;
   1698 
   1699 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1700 
   1701 	CSR_WRITE(sc, reg, regval);
   1702 
   1703 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1704 		delay(5);
   1705 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1706 			break;
   1707 	}
   1708 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1709 		aprint_error("%s: WARNING:"
   1710 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1711 		    device_xname(sc->sc_dev), reg);
   1712 	}
   1713 }
   1714 
   1715 static inline void
   1716 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1717 {
   1718 	wa->wa_low = htole32(v & 0xffffffffU);
   1719 	if (sizeof(bus_addr_t) == 8)
   1720 		wa->wa_high = htole32((uint64_t) v >> 32);
   1721 	else
   1722 		wa->wa_high = 0;
   1723 }
   1724 
   1725 /*
   1726  * Descriptor sync/init functions.
   1727  */
   1728 static inline void
   1729 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1730 {
   1731 	struct wm_softc *sc = txq->txq_sc;
   1732 
   1733 	/* If it will wrap around, sync to the end of the ring. */
   1734 	if ((start + num) > WM_NTXDESC(txq)) {
   1735 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1736 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1737 		    (WM_NTXDESC(txq) - start), ops);
   1738 		num -= (WM_NTXDESC(txq) - start);
   1739 		start = 0;
   1740 	}
   1741 
   1742 	/* Now sync whatever is left. */
   1743 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1744 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1745 }
   1746 
   1747 static inline void
   1748 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1749 {
   1750 	struct wm_softc *sc = rxq->rxq_sc;
   1751 
   1752 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1753 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1754 }
   1755 
   1756 static inline void
   1757 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1758 {
   1759 	struct wm_softc *sc = rxq->rxq_sc;
   1760 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1761 	struct mbuf *m = rxs->rxs_mbuf;
   1762 
   1763 	/*
   1764 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1765 	 * so that the payload after the Ethernet header is aligned
   1766 	 * to a 4-byte boundary.
   1767 
   1768 	 * XXX BRAINDAMAGE ALERT!
   1769 	 * The stupid chip uses the same size for every buffer, which
   1770 	 * is set in the Receive Control register.  We are using the 2K
   1771 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1772 	 * reason, we can't "scoot" packets longer than the standard
   1773 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1774 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1775 	 * the upper layer copy the headers.
   1776 	 */
   1777 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1778 
   1779 	if (sc->sc_type == WM_T_82574) {
   1780 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1781 		rxd->erx_data.erxd_addr =
   1782 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1783 		rxd->erx_data.erxd_dd = 0;
   1784 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1785 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1786 
   1787 		rxd->nqrx_data.nrxd_paddr =
   1788 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1789 		/* Currently, split header is not supported. */
   1790 		rxd->nqrx_data.nrxd_haddr = 0;
   1791 	} else {
   1792 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1793 
   1794 		wm_set_dma_addr(&rxd->wrx_addr,
   1795 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1796 		rxd->wrx_len = 0;
   1797 		rxd->wrx_cksum = 0;
   1798 		rxd->wrx_status = 0;
   1799 		rxd->wrx_errors = 0;
   1800 		rxd->wrx_special = 0;
   1801 	}
   1802 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1803 
   1804 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1805 }
   1806 
   1807 /*
   1808  * Device driver interface functions and commonly used functions.
   1809  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1810  */
   1811 
   1812 /* Lookup supported device table */
   1813 static const struct wm_product *
   1814 wm_lookup(const struct pci_attach_args *pa)
   1815 {
   1816 	const struct wm_product *wmp;
   1817 
   1818 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1819 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1820 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1821 			return wmp;
   1822 	}
   1823 	return NULL;
   1824 }
   1825 
   1826 /* The match function (ca_match) */
   1827 static int
   1828 wm_match(device_t parent, cfdata_t cf, void *aux)
   1829 {
   1830 	struct pci_attach_args *pa = aux;
   1831 
   1832 	if (wm_lookup(pa) != NULL)
   1833 		return 1;
   1834 
   1835 	return 0;
   1836 }
   1837 
   1838 /* The attach function (ca_attach) */
   1839 static void
   1840 wm_attach(device_t parent, device_t self, void *aux)
   1841 {
   1842 	struct wm_softc *sc = device_private(self);
   1843 	struct pci_attach_args *pa = aux;
   1844 	prop_dictionary_t dict;
   1845 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1846 	pci_chipset_tag_t pc = pa->pa_pc;
   1847 	int counts[PCI_INTR_TYPE_SIZE];
   1848 	pci_intr_type_t max_type;
   1849 	const char *eetype, *xname;
   1850 	bus_space_tag_t memt;
   1851 	bus_space_handle_t memh;
   1852 	bus_size_t memsize;
   1853 	int memh_valid;
   1854 	int i, error;
   1855 	const struct wm_product *wmp;
   1856 	prop_data_t ea;
   1857 	prop_number_t pn;
   1858 	uint8_t enaddr[ETHER_ADDR_LEN];
   1859 	char buf[256];
   1860 	char wqname[MAXCOMLEN];
   1861 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1862 	pcireg_t preg, memtype;
   1863 	uint16_t eeprom_data, apme_mask;
   1864 	bool force_clear_smbi;
   1865 	uint32_t link_mode;
   1866 	uint32_t reg;
   1867 
   1868 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1869 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1870 #endif
   1871 	sc->sc_dev = self;
   1872 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1873 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1874 	sc->sc_core_stopping = false;
   1875 
   1876 	wmp = wm_lookup(pa);
   1877 #ifdef DIAGNOSTIC
   1878 	if (wmp == NULL) {
   1879 		printf("\n");
   1880 		panic("wm_attach: impossible");
   1881 	}
   1882 #endif
   1883 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1884 
   1885 	sc->sc_pc = pa->pa_pc;
   1886 	sc->sc_pcitag = pa->pa_tag;
   1887 
   1888 	if (pci_dma64_available(pa))
   1889 		sc->sc_dmat = pa->pa_dmat64;
   1890 	else
   1891 		sc->sc_dmat = pa->pa_dmat;
   1892 
   1893 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1894 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1895 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1896 
   1897 	sc->sc_type = wmp->wmp_type;
   1898 
   1899 	/* Set default function pointers */
   1900 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1901 	sc->phy.release = sc->nvm.release = wm_put_null;
   1902 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1903 
   1904 	if (sc->sc_type < WM_T_82543) {
   1905 		if (sc->sc_rev < 2) {
   1906 			aprint_error_dev(sc->sc_dev,
   1907 			    "i82542 must be at least rev. 2\n");
   1908 			return;
   1909 		}
   1910 		if (sc->sc_rev < 3)
   1911 			sc->sc_type = WM_T_82542_2_0;
   1912 	}
   1913 
   1914 	/*
   1915 	 * Disable MSI for Errata:
   1916 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1917 	 *
   1918 	 *  82544: Errata 25
   1919 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1920 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1921 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1922 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1923 	 *
   1924 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1925 	 *
   1926 	 *  82571 & 82572: Errata 63
   1927 	 */
   1928 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1929 	    || (sc->sc_type == WM_T_82572))
   1930 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1931 
   1932 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1933 	    || (sc->sc_type == WM_T_82580)
   1934 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1935 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1936 		sc->sc_flags |= WM_F_NEWQUEUE;
   1937 
   1938 	/* Set device properties (mactype) */
   1939 	dict = device_properties(sc->sc_dev);
   1940 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1941 
   1942 	/*
   1943 	 * Map the device.  All devices support memory-mapped acccess,
   1944 	 * and it is really required for normal operation.
   1945 	 */
   1946 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1947 	switch (memtype) {
   1948 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1949 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1950 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1951 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1952 		break;
   1953 	default:
   1954 		memh_valid = 0;
   1955 		break;
   1956 	}
   1957 
   1958 	if (memh_valid) {
   1959 		sc->sc_st = memt;
   1960 		sc->sc_sh = memh;
   1961 		sc->sc_ss = memsize;
   1962 	} else {
   1963 		aprint_error_dev(sc->sc_dev,
   1964 		    "unable to map device registers\n");
   1965 		return;
   1966 	}
   1967 
   1968 	/*
   1969 	 * In addition, i82544 and later support I/O mapped indirect
   1970 	 * register access.  It is not desirable (nor supported in
   1971 	 * this driver) to use it for normal operation, though it is
   1972 	 * required to work around bugs in some chip versions.
   1973 	 */
   1974 	if (sc->sc_type >= WM_T_82544) {
   1975 		/* First we have to find the I/O BAR. */
   1976 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1977 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1978 			if (memtype == PCI_MAPREG_TYPE_IO)
   1979 				break;
   1980 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1981 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1982 				i += 4;	/* skip high bits, too */
   1983 		}
   1984 		if (i < PCI_MAPREG_END) {
   1985 			/*
   1986 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1987 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1988 			 * It's no problem because newer chips has no this
   1989 			 * bug.
   1990 			 *
   1991 			 * The i8254x doesn't apparently respond when the
   1992 			 * I/O BAR is 0, which looks somewhat like it's not
   1993 			 * been configured.
   1994 			 */
   1995 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1996 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1997 				aprint_error_dev(sc->sc_dev,
   1998 				    "WARNING: I/O BAR at zero.\n");
   1999 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2000 					0, &sc->sc_iot, &sc->sc_ioh,
   2001 					NULL, &sc->sc_ios) == 0) {
   2002 				sc->sc_flags |= WM_F_IOH_VALID;
   2003 			} else
   2004 				aprint_error_dev(sc->sc_dev,
   2005 				    "WARNING: unable to map I/O space\n");
   2006 		}
   2007 
   2008 	}
   2009 
   2010 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2011 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2012 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2013 	if (sc->sc_type < WM_T_82542_2_1)
   2014 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2015 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2016 
   2017 	/* Power up chip */
   2018 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2019 	    && error != EOPNOTSUPP) {
   2020 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2021 		return;
   2022 	}
   2023 
   2024 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2025 	/*
   2026 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2027 	 * resource.
   2028 	 */
   2029 	if (sc->sc_nqueues > 1) {
   2030 		max_type = PCI_INTR_TYPE_MSIX;
   2031 		/*
   2032 		 *  82583 has a MSI-X capability in the PCI configuration space
   2033 		 * but it doesn't support it. At least the document doesn't
   2034 		 * say anything about MSI-X.
   2035 		 */
   2036 		counts[PCI_INTR_TYPE_MSIX]
   2037 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2038 	} else {
   2039 		max_type = PCI_INTR_TYPE_MSI;
   2040 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2041 	}
   2042 
   2043 	/* Allocation settings */
   2044 	counts[PCI_INTR_TYPE_MSI] = 1;
   2045 	counts[PCI_INTR_TYPE_INTX] = 1;
   2046 	/* overridden by disable flags */
   2047 	if (wm_disable_msi != 0) {
   2048 		counts[PCI_INTR_TYPE_MSI] = 0;
   2049 		if (wm_disable_msix != 0) {
   2050 			max_type = PCI_INTR_TYPE_INTX;
   2051 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2052 		}
   2053 	} else if (wm_disable_msix != 0) {
   2054 		max_type = PCI_INTR_TYPE_MSI;
   2055 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2056 	}
   2057 
   2058 alloc_retry:
   2059 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2060 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2061 		return;
   2062 	}
   2063 
   2064 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2065 		error = wm_setup_msix(sc);
   2066 		if (error) {
   2067 			pci_intr_release(pc, sc->sc_intrs,
   2068 			    counts[PCI_INTR_TYPE_MSIX]);
   2069 
   2070 			/* Setup for MSI: Disable MSI-X */
   2071 			max_type = PCI_INTR_TYPE_MSI;
   2072 			counts[PCI_INTR_TYPE_MSI] = 1;
   2073 			counts[PCI_INTR_TYPE_INTX] = 1;
   2074 			goto alloc_retry;
   2075 		}
   2076 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2077 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2078 		error = wm_setup_legacy(sc);
   2079 		if (error) {
   2080 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2081 			    counts[PCI_INTR_TYPE_MSI]);
   2082 
   2083 			/* The next try is for INTx: Disable MSI */
   2084 			max_type = PCI_INTR_TYPE_INTX;
   2085 			counts[PCI_INTR_TYPE_INTX] = 1;
   2086 			goto alloc_retry;
   2087 		}
   2088 	} else {
   2089 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2090 		error = wm_setup_legacy(sc);
   2091 		if (error) {
   2092 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2093 			    counts[PCI_INTR_TYPE_INTX]);
   2094 			return;
   2095 		}
   2096 	}
   2097 
   2098 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2099 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2100 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2101 	    WM_WORKQUEUE_FLAGS);
   2102 	if (error) {
   2103 		aprint_error_dev(sc->sc_dev,
   2104 		    "unable to create workqueue\n");
   2105 		goto out;
   2106 	}
   2107 
   2108 	/*
   2109 	 * Check the function ID (unit number of the chip).
   2110 	 */
   2111 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2112 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2113 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2114 	    || (sc->sc_type == WM_T_82580)
   2115 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2116 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2117 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2118 	else
   2119 		sc->sc_funcid = 0;
   2120 
   2121 	/*
   2122 	 * Determine a few things about the bus we're connected to.
   2123 	 */
   2124 	if (sc->sc_type < WM_T_82543) {
   2125 		/* We don't really know the bus characteristics here. */
   2126 		sc->sc_bus_speed = 33;
   2127 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2128 		/*
   2129 		 * CSA (Communication Streaming Architecture) is about as fast
   2130 		 * a 32-bit 66MHz PCI Bus.
   2131 		 */
   2132 		sc->sc_flags |= WM_F_CSA;
   2133 		sc->sc_bus_speed = 66;
   2134 		aprint_verbose_dev(sc->sc_dev,
   2135 		    "Communication Streaming Architecture\n");
   2136 		if (sc->sc_type == WM_T_82547) {
   2137 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2138 			callout_setfunc(&sc->sc_txfifo_ch,
   2139 			    wm_82547_txfifo_stall, sc);
   2140 			aprint_verbose_dev(sc->sc_dev,
   2141 			    "using 82547 Tx FIFO stall work-around\n");
   2142 		}
   2143 	} else if (sc->sc_type >= WM_T_82571) {
   2144 		sc->sc_flags |= WM_F_PCIE;
   2145 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2146 		    && (sc->sc_type != WM_T_ICH10)
   2147 		    && (sc->sc_type != WM_T_PCH)
   2148 		    && (sc->sc_type != WM_T_PCH2)
   2149 		    && (sc->sc_type != WM_T_PCH_LPT)
   2150 		    && (sc->sc_type != WM_T_PCH_SPT)
   2151 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2152 			/* ICH* and PCH* have no PCIe capability registers */
   2153 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2154 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2155 				NULL) == 0)
   2156 				aprint_error_dev(sc->sc_dev,
   2157 				    "unable to find PCIe capability\n");
   2158 		}
   2159 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2160 	} else {
   2161 		reg = CSR_READ(sc, WMREG_STATUS);
   2162 		if (reg & STATUS_BUS64)
   2163 			sc->sc_flags |= WM_F_BUS64;
   2164 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2165 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2166 
   2167 			sc->sc_flags |= WM_F_PCIX;
   2168 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2169 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2170 				aprint_error_dev(sc->sc_dev,
   2171 				    "unable to find PCIX capability\n");
   2172 			else if (sc->sc_type != WM_T_82545_3 &&
   2173 				 sc->sc_type != WM_T_82546_3) {
   2174 				/*
   2175 				 * Work around a problem caused by the BIOS
   2176 				 * setting the max memory read byte count
   2177 				 * incorrectly.
   2178 				 */
   2179 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2180 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2181 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2182 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2183 
   2184 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2185 				    PCIX_CMD_BYTECNT_SHIFT;
   2186 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2187 				    PCIX_STATUS_MAXB_SHIFT;
   2188 				if (bytecnt > maxb) {
   2189 					aprint_verbose_dev(sc->sc_dev,
   2190 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2191 					    512 << bytecnt, 512 << maxb);
   2192 					pcix_cmd = (pcix_cmd &
   2193 					    ~PCIX_CMD_BYTECNT_MASK) |
   2194 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2195 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2196 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2197 					    pcix_cmd);
   2198 				}
   2199 			}
   2200 		}
   2201 		/*
   2202 		 * The quad port adapter is special; it has a PCIX-PCIX
   2203 		 * bridge on the board, and can run the secondary bus at
   2204 		 * a higher speed.
   2205 		 */
   2206 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2207 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2208 								      : 66;
   2209 		} else if (sc->sc_flags & WM_F_PCIX) {
   2210 			switch (reg & STATUS_PCIXSPD_MASK) {
   2211 			case STATUS_PCIXSPD_50_66:
   2212 				sc->sc_bus_speed = 66;
   2213 				break;
   2214 			case STATUS_PCIXSPD_66_100:
   2215 				sc->sc_bus_speed = 100;
   2216 				break;
   2217 			case STATUS_PCIXSPD_100_133:
   2218 				sc->sc_bus_speed = 133;
   2219 				break;
   2220 			default:
   2221 				aprint_error_dev(sc->sc_dev,
   2222 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2223 				    reg & STATUS_PCIXSPD_MASK);
   2224 				sc->sc_bus_speed = 66;
   2225 				break;
   2226 			}
   2227 		} else
   2228 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2229 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2230 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2231 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2232 	}
   2233 
   2234 	/* clear interesting stat counters */
   2235 	CSR_READ(sc, WMREG_COLC);
   2236 	CSR_READ(sc, WMREG_RXERRC);
   2237 
   2238 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2239 	    || (sc->sc_type >= WM_T_ICH8))
   2240 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2241 	if (sc->sc_type >= WM_T_ICH8)
   2242 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2243 
   2244 	/* Set PHY, NVM mutex related stuff */
   2245 	switch (sc->sc_type) {
   2246 	case WM_T_82542_2_0:
   2247 	case WM_T_82542_2_1:
   2248 	case WM_T_82543:
   2249 	case WM_T_82544:
   2250 		/* Microwire */
   2251 		sc->nvm.read = wm_nvm_read_uwire;
   2252 		sc->sc_nvm_wordsize = 64;
   2253 		sc->sc_nvm_addrbits = 6;
   2254 		break;
   2255 	case WM_T_82540:
   2256 	case WM_T_82545:
   2257 	case WM_T_82545_3:
   2258 	case WM_T_82546:
   2259 	case WM_T_82546_3:
   2260 		/* Microwire */
   2261 		sc->nvm.read = wm_nvm_read_uwire;
   2262 		reg = CSR_READ(sc, WMREG_EECD);
   2263 		if (reg & EECD_EE_SIZE) {
   2264 			sc->sc_nvm_wordsize = 256;
   2265 			sc->sc_nvm_addrbits = 8;
   2266 		} else {
   2267 			sc->sc_nvm_wordsize = 64;
   2268 			sc->sc_nvm_addrbits = 6;
   2269 		}
   2270 		sc->sc_flags |= WM_F_LOCK_EECD;
   2271 		sc->nvm.acquire = wm_get_eecd;
   2272 		sc->nvm.release = wm_put_eecd;
   2273 		break;
   2274 	case WM_T_82541:
   2275 	case WM_T_82541_2:
   2276 	case WM_T_82547:
   2277 	case WM_T_82547_2:
   2278 		reg = CSR_READ(sc, WMREG_EECD);
   2279 		/*
   2280 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2281 		 * on 8254[17], so set flags and functios before calling it.
   2282 		 */
   2283 		sc->sc_flags |= WM_F_LOCK_EECD;
   2284 		sc->nvm.acquire = wm_get_eecd;
   2285 		sc->nvm.release = wm_put_eecd;
   2286 		if (reg & EECD_EE_TYPE) {
   2287 			/* SPI */
   2288 			sc->nvm.read = wm_nvm_read_spi;
   2289 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2290 			wm_nvm_set_addrbits_size_eecd(sc);
   2291 		} else {
   2292 			/* Microwire */
   2293 			sc->nvm.read = wm_nvm_read_uwire;
   2294 			if ((reg & EECD_EE_ABITS) != 0) {
   2295 				sc->sc_nvm_wordsize = 256;
   2296 				sc->sc_nvm_addrbits = 8;
   2297 			} else {
   2298 				sc->sc_nvm_wordsize = 64;
   2299 				sc->sc_nvm_addrbits = 6;
   2300 			}
   2301 		}
   2302 		break;
   2303 	case WM_T_82571:
   2304 	case WM_T_82572:
   2305 		/* SPI */
   2306 		sc->nvm.read = wm_nvm_read_eerd;
   2307 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2308 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2309 		wm_nvm_set_addrbits_size_eecd(sc);
   2310 		sc->phy.acquire = wm_get_swsm_semaphore;
   2311 		sc->phy.release = wm_put_swsm_semaphore;
   2312 		sc->nvm.acquire = wm_get_nvm_82571;
   2313 		sc->nvm.release = wm_put_nvm_82571;
   2314 		break;
   2315 	case WM_T_82573:
   2316 	case WM_T_82574:
   2317 	case WM_T_82583:
   2318 		sc->nvm.read = wm_nvm_read_eerd;
   2319 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2320 		if (sc->sc_type == WM_T_82573) {
   2321 			sc->phy.acquire = wm_get_swsm_semaphore;
   2322 			sc->phy.release = wm_put_swsm_semaphore;
   2323 			sc->nvm.acquire = wm_get_nvm_82571;
   2324 			sc->nvm.release = wm_put_nvm_82571;
   2325 		} else {
   2326 			/* Both PHY and NVM use the same semaphore. */
   2327 			sc->phy.acquire = sc->nvm.acquire
   2328 			    = wm_get_swfwhw_semaphore;
   2329 			sc->phy.release = sc->nvm.release
   2330 			    = wm_put_swfwhw_semaphore;
   2331 		}
   2332 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2333 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2334 			sc->sc_nvm_wordsize = 2048;
   2335 		} else {
   2336 			/* SPI */
   2337 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2338 			wm_nvm_set_addrbits_size_eecd(sc);
   2339 		}
   2340 		break;
   2341 	case WM_T_82575:
   2342 	case WM_T_82576:
   2343 	case WM_T_82580:
   2344 	case WM_T_I350:
   2345 	case WM_T_I354:
   2346 	case WM_T_80003:
   2347 		/* SPI */
   2348 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2349 		wm_nvm_set_addrbits_size_eecd(sc);
   2350 		if ((sc->sc_type == WM_T_80003)
   2351 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2352 			sc->nvm.read = wm_nvm_read_eerd;
   2353 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2354 		} else {
   2355 			sc->nvm.read = wm_nvm_read_spi;
   2356 			sc->sc_flags |= WM_F_LOCK_EECD;
   2357 		}
   2358 		sc->phy.acquire = wm_get_phy_82575;
   2359 		sc->phy.release = wm_put_phy_82575;
   2360 		sc->nvm.acquire = wm_get_nvm_80003;
   2361 		sc->nvm.release = wm_put_nvm_80003;
   2362 		break;
   2363 	case WM_T_ICH8:
   2364 	case WM_T_ICH9:
   2365 	case WM_T_ICH10:
   2366 	case WM_T_PCH:
   2367 	case WM_T_PCH2:
   2368 	case WM_T_PCH_LPT:
   2369 		sc->nvm.read = wm_nvm_read_ich8;
   2370 		/* FLASH */
   2371 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2372 		sc->sc_nvm_wordsize = 2048;
   2373 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2374 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2375 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2376 			aprint_error_dev(sc->sc_dev,
   2377 			    "can't map FLASH registers\n");
   2378 			goto out;
   2379 		}
   2380 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2381 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2382 		    ICH_FLASH_SECTOR_SIZE;
   2383 		sc->sc_ich8_flash_bank_size =
   2384 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2385 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2386 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2387 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2388 		sc->sc_flashreg_offset = 0;
   2389 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2390 		sc->phy.release = wm_put_swflag_ich8lan;
   2391 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2392 		sc->nvm.release = wm_put_nvm_ich8lan;
   2393 		break;
   2394 	case WM_T_PCH_SPT:
   2395 	case WM_T_PCH_CNP:
   2396 		sc->nvm.read = wm_nvm_read_spt;
   2397 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2398 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2399 		sc->sc_flasht = sc->sc_st;
   2400 		sc->sc_flashh = sc->sc_sh;
   2401 		sc->sc_ich8_flash_base = 0;
   2402 		sc->sc_nvm_wordsize =
   2403 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2404 		    * NVM_SIZE_MULTIPLIER;
   2405 		/* It is size in bytes, we want words */
   2406 		sc->sc_nvm_wordsize /= 2;
   2407 		/* Assume 2 banks */
   2408 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2409 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2410 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2411 		sc->phy.release = wm_put_swflag_ich8lan;
   2412 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2413 		sc->nvm.release = wm_put_nvm_ich8lan;
   2414 		break;
   2415 	case WM_T_I210:
   2416 	case WM_T_I211:
   2417 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2418 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2419 		if (wm_nvm_flash_presence_i210(sc)) {
   2420 			sc->nvm.read = wm_nvm_read_eerd;
   2421 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2422 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2423 			wm_nvm_set_addrbits_size_eecd(sc);
   2424 		} else {
   2425 			sc->nvm.read = wm_nvm_read_invm;
   2426 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2427 			sc->sc_nvm_wordsize = INVM_SIZE;
   2428 		}
   2429 		sc->phy.acquire = wm_get_phy_82575;
   2430 		sc->phy.release = wm_put_phy_82575;
   2431 		sc->nvm.acquire = wm_get_nvm_80003;
   2432 		sc->nvm.release = wm_put_nvm_80003;
   2433 		break;
   2434 	default:
   2435 		break;
   2436 	}
   2437 
   2438 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2439 	switch (sc->sc_type) {
   2440 	case WM_T_82571:
   2441 	case WM_T_82572:
   2442 		reg = CSR_READ(sc, WMREG_SWSM2);
   2443 		if ((reg & SWSM2_LOCK) == 0) {
   2444 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2445 			force_clear_smbi = true;
   2446 		} else
   2447 			force_clear_smbi = false;
   2448 		break;
   2449 	case WM_T_82573:
   2450 	case WM_T_82574:
   2451 	case WM_T_82583:
   2452 		force_clear_smbi = true;
   2453 		break;
   2454 	default:
   2455 		force_clear_smbi = false;
   2456 		break;
   2457 	}
   2458 	if (force_clear_smbi) {
   2459 		reg = CSR_READ(sc, WMREG_SWSM);
   2460 		if ((reg & SWSM_SMBI) != 0)
   2461 			aprint_error_dev(sc->sc_dev,
   2462 			    "Please update the Bootagent\n");
   2463 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2464 	}
   2465 
   2466 	/*
   2467 	 * Defer printing the EEPROM type until after verifying the checksum
   2468 	 * This allows the EEPROM type to be printed correctly in the case
   2469 	 * that no EEPROM is attached.
   2470 	 */
   2471 	/*
   2472 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2473 	 * this for later, so we can fail future reads from the EEPROM.
   2474 	 */
   2475 	if (wm_nvm_validate_checksum(sc)) {
   2476 		/*
   2477 		 * Read twice again because some PCI-e parts fail the
   2478 		 * first check due to the link being in sleep state.
   2479 		 */
   2480 		if (wm_nvm_validate_checksum(sc))
   2481 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2482 	}
   2483 
   2484 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2485 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2486 	else {
   2487 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2488 		    sc->sc_nvm_wordsize);
   2489 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2490 			aprint_verbose("iNVM");
   2491 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2492 			aprint_verbose("FLASH(HW)");
   2493 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2494 			aprint_verbose("FLASH");
   2495 		else {
   2496 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2497 				eetype = "SPI";
   2498 			else
   2499 				eetype = "MicroWire";
   2500 			aprint_verbose("(%d address bits) %s EEPROM",
   2501 			    sc->sc_nvm_addrbits, eetype);
   2502 		}
   2503 	}
   2504 	wm_nvm_version(sc);
   2505 	aprint_verbose("\n");
   2506 
   2507 	/*
   2508 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2509 	 * incorrect.
   2510 	 */
   2511 	wm_gmii_setup_phytype(sc, 0, 0);
   2512 
   2513 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2514 	switch (sc->sc_type) {
   2515 	case WM_T_ICH8:
   2516 	case WM_T_ICH9:
   2517 	case WM_T_ICH10:
   2518 	case WM_T_PCH:
   2519 	case WM_T_PCH2:
   2520 	case WM_T_PCH_LPT:
   2521 	case WM_T_PCH_SPT:
   2522 	case WM_T_PCH_CNP:
   2523 		apme_mask = WUC_APME;
   2524 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2525 		if ((eeprom_data & apme_mask) != 0)
   2526 			sc->sc_flags |= WM_F_WOL;
   2527 		break;
   2528 	default:
   2529 		break;
   2530 	}
   2531 
   2532 	/* Reset the chip to a known state. */
   2533 	wm_reset(sc);
   2534 
   2535 	/*
   2536 	 * Check for I21[01] PLL workaround.
   2537 	 *
   2538 	 * Three cases:
   2539 	 * a) Chip is I211.
   2540 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2541 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2542 	 */
   2543 	if (sc->sc_type == WM_T_I211)
   2544 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2545 	if (sc->sc_type == WM_T_I210) {
   2546 		if (!wm_nvm_flash_presence_i210(sc))
   2547 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2548 		else if ((sc->sc_nvm_ver_major < 3)
   2549 		    || ((sc->sc_nvm_ver_major == 3)
   2550 			&& (sc->sc_nvm_ver_minor < 25))) {
   2551 			aprint_verbose_dev(sc->sc_dev,
   2552 			    "ROM image version %d.%d is older than 3.25\n",
   2553 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2554 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2555 		}
   2556 	}
   2557 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2558 		wm_pll_workaround_i210(sc);
   2559 
   2560 	wm_get_wakeup(sc);
   2561 
   2562 	/* Non-AMT based hardware can now take control from firmware */
   2563 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2564 		wm_get_hw_control(sc);
   2565 
   2566 	/*
   2567 	 * Read the Ethernet address from the EEPROM, if not first found
   2568 	 * in device properties.
   2569 	 */
   2570 	ea = prop_dictionary_get(dict, "mac-address");
   2571 	if (ea != NULL) {
   2572 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2573 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2574 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2575 	} else {
   2576 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2577 			aprint_error_dev(sc->sc_dev,
   2578 			    "unable to read Ethernet address\n");
   2579 			goto out;
   2580 		}
   2581 	}
   2582 
   2583 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2584 	    ether_sprintf(enaddr));
   2585 
   2586 	/*
   2587 	 * Read the config info from the EEPROM, and set up various
   2588 	 * bits in the control registers based on their contents.
   2589 	 */
   2590 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2591 	if (pn != NULL) {
   2592 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2593 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2594 	} else {
   2595 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2596 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2597 			goto out;
   2598 		}
   2599 	}
   2600 
   2601 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2602 	if (pn != NULL) {
   2603 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2604 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2605 	} else {
   2606 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2607 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2608 			goto out;
   2609 		}
   2610 	}
   2611 
   2612 	/* check for WM_F_WOL */
   2613 	switch (sc->sc_type) {
   2614 	case WM_T_82542_2_0:
   2615 	case WM_T_82542_2_1:
   2616 	case WM_T_82543:
   2617 		/* dummy? */
   2618 		eeprom_data = 0;
   2619 		apme_mask = NVM_CFG3_APME;
   2620 		break;
   2621 	case WM_T_82544:
   2622 		apme_mask = NVM_CFG2_82544_APM_EN;
   2623 		eeprom_data = cfg2;
   2624 		break;
   2625 	case WM_T_82546:
   2626 	case WM_T_82546_3:
   2627 	case WM_T_82571:
   2628 	case WM_T_82572:
   2629 	case WM_T_82573:
   2630 	case WM_T_82574:
   2631 	case WM_T_82583:
   2632 	case WM_T_80003:
   2633 	case WM_T_82575:
   2634 	case WM_T_82576:
   2635 		apme_mask = NVM_CFG3_APME;
   2636 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2637 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2638 		break;
   2639 	case WM_T_82580:
   2640 	case WM_T_I350:
   2641 	case WM_T_I354:
   2642 	case WM_T_I210:
   2643 	case WM_T_I211:
   2644 		apme_mask = NVM_CFG3_APME;
   2645 		wm_nvm_read(sc,
   2646 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2647 		    1, &eeprom_data);
   2648 		break;
   2649 	case WM_T_ICH8:
   2650 	case WM_T_ICH9:
   2651 	case WM_T_ICH10:
   2652 	case WM_T_PCH:
   2653 	case WM_T_PCH2:
   2654 	case WM_T_PCH_LPT:
   2655 	case WM_T_PCH_SPT:
   2656 	case WM_T_PCH_CNP:
   2657 		/* Already checked before wm_reset () */
   2658 		apme_mask = eeprom_data = 0;
   2659 		break;
   2660 	default: /* XXX 82540 */
   2661 		apme_mask = NVM_CFG3_APME;
   2662 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2663 		break;
   2664 	}
   2665 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2666 	if ((eeprom_data & apme_mask) != 0)
   2667 		sc->sc_flags |= WM_F_WOL;
   2668 
   2669 	/*
   2670 	 * We have the eeprom settings, now apply the special cases
   2671 	 * where the eeprom may be wrong or the board won't support
   2672 	 * wake on lan on a particular port
   2673 	 */
   2674 	switch (sc->sc_pcidevid) {
   2675 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2676 		sc->sc_flags &= ~WM_F_WOL;
   2677 		break;
   2678 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2679 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2680 		/* Wake events only supported on port A for dual fiber
   2681 		 * regardless of eeprom setting */
   2682 		if (sc->sc_funcid == 1)
   2683 			sc->sc_flags &= ~WM_F_WOL;
   2684 		break;
   2685 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2686 		/* If quad port adapter, disable WoL on all but port A */
   2687 		if (sc->sc_funcid != 0)
   2688 			sc->sc_flags &= ~WM_F_WOL;
   2689 		break;
   2690 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2691 		/* Wake events only supported on port A for dual fiber
   2692 		 * regardless of eeprom setting */
   2693 		if (sc->sc_funcid == 1)
   2694 			sc->sc_flags &= ~WM_F_WOL;
   2695 		break;
   2696 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2697 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2698 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2699 		/* If quad port adapter, disable WoL on all but port A */
   2700 		if (sc->sc_funcid != 0)
   2701 			sc->sc_flags &= ~WM_F_WOL;
   2702 		break;
   2703 	}
   2704 
   2705 	if (sc->sc_type >= WM_T_82575) {
   2706 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2707 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2708 			    nvmword);
   2709 			if ((sc->sc_type == WM_T_82575) ||
   2710 			    (sc->sc_type == WM_T_82576)) {
   2711 				/* Check NVM for autonegotiation */
   2712 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2713 				    != 0)
   2714 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2715 			}
   2716 			if ((sc->sc_type == WM_T_82575) ||
   2717 			    (sc->sc_type == WM_T_I350)) {
   2718 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2719 					sc->sc_flags |= WM_F_MAS;
   2720 			}
   2721 		}
   2722 	}
   2723 
   2724 	/*
   2725 	 * XXX need special handling for some multiple port cards
   2726 	 * to disable a paticular port.
   2727 	 */
   2728 
   2729 	if (sc->sc_type >= WM_T_82544) {
   2730 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2731 		if (pn != NULL) {
   2732 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2733 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2734 		} else {
   2735 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2736 				aprint_error_dev(sc->sc_dev,
   2737 				    "unable to read SWDPIN\n");
   2738 				goto out;
   2739 			}
   2740 		}
   2741 	}
   2742 
   2743 	if (cfg1 & NVM_CFG1_ILOS)
   2744 		sc->sc_ctrl |= CTRL_ILOS;
   2745 
   2746 	/*
   2747 	 * XXX
   2748 	 * This code isn't correct because pin 2 and 3 are located
   2749 	 * in different position on newer chips. Check all datasheet.
   2750 	 *
   2751 	 * Until resolve this problem, check if a chip < 82580
   2752 	 */
   2753 	if (sc->sc_type <= WM_T_82580) {
   2754 		if (sc->sc_type >= WM_T_82544) {
   2755 			sc->sc_ctrl |=
   2756 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2757 			    CTRL_SWDPIO_SHIFT;
   2758 			sc->sc_ctrl |=
   2759 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2760 			    CTRL_SWDPINS_SHIFT;
   2761 		} else {
   2762 			sc->sc_ctrl |=
   2763 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2764 			    CTRL_SWDPIO_SHIFT;
   2765 		}
   2766 	}
   2767 
   2768 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2769 		wm_nvm_read(sc,
   2770 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2771 		    1, &nvmword);
   2772 		if (nvmword & NVM_CFG3_ILOS)
   2773 			sc->sc_ctrl |= CTRL_ILOS;
   2774 	}
   2775 
   2776 #if 0
   2777 	if (sc->sc_type >= WM_T_82544) {
   2778 		if (cfg1 & NVM_CFG1_IPS0)
   2779 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2780 		if (cfg1 & NVM_CFG1_IPS1)
   2781 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2782 		sc->sc_ctrl_ext |=
   2783 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2784 		    CTRL_EXT_SWDPIO_SHIFT;
   2785 		sc->sc_ctrl_ext |=
   2786 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2787 		    CTRL_EXT_SWDPINS_SHIFT;
   2788 	} else {
   2789 		sc->sc_ctrl_ext |=
   2790 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2791 		    CTRL_EXT_SWDPIO_SHIFT;
   2792 	}
   2793 #endif
   2794 
   2795 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2796 #if 0
   2797 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2798 #endif
   2799 
   2800 	if (sc->sc_type == WM_T_PCH) {
   2801 		uint16_t val;
   2802 
   2803 		/* Save the NVM K1 bit setting */
   2804 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2805 
   2806 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2807 			sc->sc_nvm_k1_enabled = 1;
   2808 		else
   2809 			sc->sc_nvm_k1_enabled = 0;
   2810 	}
   2811 
   2812 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2813 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2814 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2815 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2816 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2817 	    || sc->sc_type == WM_T_82573
   2818 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2819 		/* Copper only */
   2820 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2821 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2822 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2823 	    || (sc->sc_type ==WM_T_I211)) {
   2824 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2825 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2826 		switch (link_mode) {
   2827 		case CTRL_EXT_LINK_MODE_1000KX:
   2828 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2829 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2830 			break;
   2831 		case CTRL_EXT_LINK_MODE_SGMII:
   2832 			if (wm_sgmii_uses_mdio(sc)) {
   2833 				aprint_normal_dev(sc->sc_dev,
   2834 				    "SGMII(MDIO)\n");
   2835 				sc->sc_flags |= WM_F_SGMII;
   2836 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2837 				break;
   2838 			}
   2839 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2840 			/*FALLTHROUGH*/
   2841 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2842 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2843 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2844 				if (link_mode
   2845 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2846 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2847 					sc->sc_flags |= WM_F_SGMII;
   2848 					aprint_verbose_dev(sc->sc_dev,
   2849 					    "SGMII\n");
   2850 				} else {
   2851 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2852 					aprint_verbose_dev(sc->sc_dev,
   2853 					    "SERDES\n");
   2854 				}
   2855 				break;
   2856 			}
   2857 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2858 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2859 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2860 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2861 				sc->sc_flags |= WM_F_SGMII;
   2862 			}
   2863 			/* Do not change link mode for 100BaseFX */
   2864 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2865 				break;
   2866 
   2867 			/* Change current link mode setting */
   2868 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2869 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2870 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2871 			else
   2872 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2873 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2874 			break;
   2875 		case CTRL_EXT_LINK_MODE_GMII:
   2876 		default:
   2877 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2878 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2879 			break;
   2880 		}
   2881 
   2882 		reg &= ~CTRL_EXT_I2C_ENA;
   2883 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2884 			reg |= CTRL_EXT_I2C_ENA;
   2885 		else
   2886 			reg &= ~CTRL_EXT_I2C_ENA;
   2887 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2888 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2889 			if (!wm_sgmii_uses_mdio(sc))
   2890 				wm_gmii_setup_phytype(sc, 0, 0);
   2891 			wm_reset_mdicnfg_82580(sc);
   2892 		}
   2893 	} else if (sc->sc_type < WM_T_82543 ||
   2894 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2895 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2896 			aprint_error_dev(sc->sc_dev,
   2897 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2898 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2899 		}
   2900 	} else {
   2901 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2902 			aprint_error_dev(sc->sc_dev,
   2903 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2904 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2905 		}
   2906 	}
   2907 
   2908 	if (sc->sc_type >= WM_T_PCH2)
   2909 		sc->sc_flags |= WM_F_EEE;
   2910 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2911 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2912 		/* XXX: Need special handling for I354. (not yet) */
   2913 		if (sc->sc_type != WM_T_I354)
   2914 			sc->sc_flags |= WM_F_EEE;
   2915 	}
   2916 
   2917 	/*
   2918 	 * The I350 has a bug where it always strips the CRC whether
   2919 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   2920 	 */
   2921 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2922 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2923 		sc->sc_flags |= WM_F_CRC_STRIP;
   2924 
   2925 	/* Set device properties (macflags) */
   2926 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2927 
   2928 	if (sc->sc_flags != 0) {
   2929 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2930 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2931 	}
   2932 
   2933 #ifdef WM_MPSAFE
   2934 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2935 #else
   2936 	sc->sc_core_lock = NULL;
   2937 #endif
   2938 
   2939 	/* Initialize the media structures accordingly. */
   2940 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2941 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2942 	else
   2943 		wm_tbi_mediainit(sc); /* All others */
   2944 
   2945 	ifp = &sc->sc_ethercom.ec_if;
   2946 	xname = device_xname(sc->sc_dev);
   2947 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2948 	ifp->if_softc = sc;
   2949 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2950 #ifdef WM_MPSAFE
   2951 	ifp->if_extflags = IFEF_MPSAFE;
   2952 #endif
   2953 	ifp->if_ioctl = wm_ioctl;
   2954 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2955 		ifp->if_start = wm_nq_start;
   2956 		/*
   2957 		 * When the number of CPUs is one and the controller can use
   2958 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2959 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2960 		 * and the other is used for link status changing.
   2961 		 * In this situation, wm_nq_transmit() is disadvantageous
   2962 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2963 		 */
   2964 		if (wm_is_using_multiqueue(sc))
   2965 			ifp->if_transmit = wm_nq_transmit;
   2966 	} else {
   2967 		ifp->if_start = wm_start;
   2968 		/*
   2969 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2970 		 */
   2971 		if (wm_is_using_multiqueue(sc))
   2972 			ifp->if_transmit = wm_transmit;
   2973 	}
   2974 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2975 	ifp->if_init = wm_init;
   2976 	ifp->if_stop = wm_stop;
   2977 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2978 	IFQ_SET_READY(&ifp->if_snd);
   2979 
   2980 	/* Check for jumbo frame */
   2981 	switch (sc->sc_type) {
   2982 	case WM_T_82573:
   2983 		/* XXX limited to 9234 if ASPM is disabled */
   2984 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2985 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2986 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2987 		break;
   2988 	case WM_T_82571:
   2989 	case WM_T_82572:
   2990 	case WM_T_82574:
   2991 	case WM_T_82583:
   2992 	case WM_T_82575:
   2993 	case WM_T_82576:
   2994 	case WM_T_82580:
   2995 	case WM_T_I350:
   2996 	case WM_T_I354:
   2997 	case WM_T_I210:
   2998 	case WM_T_I211:
   2999 	case WM_T_80003:
   3000 	case WM_T_ICH9:
   3001 	case WM_T_ICH10:
   3002 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3003 	case WM_T_PCH_LPT:
   3004 	case WM_T_PCH_SPT:
   3005 	case WM_T_PCH_CNP:
   3006 		/* XXX limited to 9234 */
   3007 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3008 		break;
   3009 	case WM_T_PCH:
   3010 		/* XXX limited to 4096 */
   3011 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3012 		break;
   3013 	case WM_T_82542_2_0:
   3014 	case WM_T_82542_2_1:
   3015 	case WM_T_ICH8:
   3016 		/* No support for jumbo frame */
   3017 		break;
   3018 	default:
   3019 		/* ETHER_MAX_LEN_JUMBO */
   3020 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3021 		break;
   3022 	}
   3023 
   3024 	/* If we're a i82543 or greater, we can support VLANs. */
   3025 	if (sc->sc_type >= WM_T_82543) {
   3026 		sc->sc_ethercom.ec_capabilities |=
   3027 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3028 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3029 	}
   3030 
   3031 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3032 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3033 
   3034 	/*
   3035 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   3036 	 * on i82543 and later.
   3037 	 */
   3038 	if (sc->sc_type >= WM_T_82543) {
   3039 		ifp->if_capabilities |=
   3040 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3041 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3042 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3043 		    IFCAP_CSUM_TCPv6_Tx |
   3044 		    IFCAP_CSUM_UDPv6_Tx;
   3045 	}
   3046 
   3047 	/*
   3048 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3049 	 *
   3050 	 *	82541GI (8086:1076) ... no
   3051 	 *	82572EI (8086:10b9) ... yes
   3052 	 */
   3053 	if (sc->sc_type >= WM_T_82571) {
   3054 		ifp->if_capabilities |=
   3055 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3056 	}
   3057 
   3058 	/*
   3059 	 * If we're a i82544 or greater (except i82547), we can do
   3060 	 * TCP segmentation offload.
   3061 	 */
   3062 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3063 		ifp->if_capabilities |= IFCAP_TSOv4;
   3064 	}
   3065 
   3066 	if (sc->sc_type >= WM_T_82571) {
   3067 		ifp->if_capabilities |= IFCAP_TSOv6;
   3068 	}
   3069 
   3070 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3071 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3072 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3073 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3074 
   3075 	/* Attach the interface. */
   3076 	error = if_initialize(ifp);
   3077 	if (error != 0) {
   3078 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   3079 		    error);
   3080 		return; /* Error */
   3081 	}
   3082 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3083 	ether_ifattach(ifp, enaddr);
   3084 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3085 	if_register(ifp);
   3086 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3087 	    RND_FLAG_DEFAULT);
   3088 
   3089 #ifdef WM_EVENT_COUNTERS
   3090 	/* Attach event counters. */
   3091 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3092 	    NULL, xname, "linkintr");
   3093 
   3094 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3095 	    NULL, xname, "tx_xoff");
   3096 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3097 	    NULL, xname, "tx_xon");
   3098 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3099 	    NULL, xname, "rx_xoff");
   3100 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3101 	    NULL, xname, "rx_xon");
   3102 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3103 	    NULL, xname, "rx_macctl");
   3104 #endif /* WM_EVENT_COUNTERS */
   3105 
   3106 	sc->sc_txrx_use_workqueue = false;
   3107 
   3108 	if (wm_phy_need_linkdown_discard(sc))
   3109 		wm_set_linkdown_discard(sc);
   3110 
   3111 	wm_init_sysctls(sc);
   3112 
   3113 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3114 		pmf_class_network_register(self, ifp);
   3115 	else
   3116 		aprint_error_dev(self, "couldn't establish power handler\n");
   3117 
   3118 	sc->sc_flags |= WM_F_ATTACHED;
   3119 out:
   3120 	return;
   3121 }
   3122 
   3123 /* The detach function (ca_detach) */
   3124 static int
   3125 wm_detach(device_t self, int flags __unused)
   3126 {
   3127 	struct wm_softc *sc = device_private(self);
   3128 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3129 	int i;
   3130 
   3131 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3132 		return 0;
   3133 
   3134 	/* Stop the interface. Callouts are stopped in it. */
   3135 	wm_stop(ifp, 1);
   3136 
   3137 	pmf_device_deregister(self);
   3138 
   3139 	sysctl_teardown(&sc->sc_sysctllog);
   3140 
   3141 #ifdef WM_EVENT_COUNTERS
   3142 	evcnt_detach(&sc->sc_ev_linkintr);
   3143 
   3144 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3145 	evcnt_detach(&sc->sc_ev_tx_xon);
   3146 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3147 	evcnt_detach(&sc->sc_ev_rx_xon);
   3148 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3149 #endif /* WM_EVENT_COUNTERS */
   3150 
   3151 	rnd_detach_source(&sc->rnd_source);
   3152 
   3153 	/* Tell the firmware about the release */
   3154 	WM_CORE_LOCK(sc);
   3155 	wm_release_manageability(sc);
   3156 	wm_release_hw_control(sc);
   3157 	wm_enable_wakeup(sc);
   3158 	WM_CORE_UNLOCK(sc);
   3159 
   3160 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3161 
   3162 	ether_ifdetach(ifp);
   3163 	if_detach(ifp);
   3164 	if_percpuq_destroy(sc->sc_ipq);
   3165 
   3166 	/* Delete all remaining media. */
   3167 	ifmedia_fini(&sc->sc_mii.mii_media);
   3168 
   3169 	/* Unload RX dmamaps and free mbufs */
   3170 	for (i = 0; i < sc->sc_nqueues; i++) {
   3171 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3172 		mutex_enter(rxq->rxq_lock);
   3173 		wm_rxdrain(rxq);
   3174 		mutex_exit(rxq->rxq_lock);
   3175 	}
   3176 	/* Must unlock here */
   3177 
   3178 	/* Disestablish the interrupt handler */
   3179 	for (i = 0; i < sc->sc_nintrs; i++) {
   3180 		if (sc->sc_ihs[i] != NULL) {
   3181 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3182 			sc->sc_ihs[i] = NULL;
   3183 		}
   3184 	}
   3185 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3186 
   3187 	/* wm_stop() ensure workqueue is stopped. */
   3188 	workqueue_destroy(sc->sc_queue_wq);
   3189 
   3190 	for (i = 0; i < sc->sc_nqueues; i++)
   3191 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3192 
   3193 	wm_free_txrx_queues(sc);
   3194 
   3195 	/* Unmap the registers */
   3196 	if (sc->sc_ss) {
   3197 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3198 		sc->sc_ss = 0;
   3199 	}
   3200 	if (sc->sc_ios) {
   3201 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3202 		sc->sc_ios = 0;
   3203 	}
   3204 	if (sc->sc_flashs) {
   3205 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3206 		sc->sc_flashs = 0;
   3207 	}
   3208 
   3209 	if (sc->sc_core_lock)
   3210 		mutex_obj_free(sc->sc_core_lock);
   3211 	if (sc->sc_ich_phymtx)
   3212 		mutex_obj_free(sc->sc_ich_phymtx);
   3213 	if (sc->sc_ich_nvmmtx)
   3214 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3215 
   3216 	return 0;
   3217 }
   3218 
   3219 static bool
   3220 wm_suspend(device_t self, const pmf_qual_t *qual)
   3221 {
   3222 	struct wm_softc *sc = device_private(self);
   3223 
   3224 	wm_release_manageability(sc);
   3225 	wm_release_hw_control(sc);
   3226 	wm_enable_wakeup(sc);
   3227 
   3228 	return true;
   3229 }
   3230 
   3231 static bool
   3232 wm_resume(device_t self, const pmf_qual_t *qual)
   3233 {
   3234 	struct wm_softc *sc = device_private(self);
   3235 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3236 	pcireg_t reg;
   3237 	char buf[256];
   3238 
   3239 	reg = CSR_READ(sc, WMREG_WUS);
   3240 	if (reg != 0) {
   3241 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3242 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3243 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3244 	}
   3245 
   3246 	if (sc->sc_type >= WM_T_PCH2)
   3247 		wm_resume_workarounds_pchlan(sc);
   3248 	if ((ifp->if_flags & IFF_UP) == 0) {
   3249 		wm_reset(sc);
   3250 		/* Non-AMT based hardware can now take control from firmware */
   3251 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3252 			wm_get_hw_control(sc);
   3253 		wm_init_manageability(sc);
   3254 	} else {
   3255 		/*
   3256 		 * We called pmf_class_network_register(), so if_init() is
   3257 		 * automatically called when IFF_UP. wm_reset(),
   3258 		 * wm_get_hw_control() and wm_init_manageability() are called
   3259 		 * via wm_init().
   3260 		 */
   3261 	}
   3262 
   3263 	return true;
   3264 }
   3265 
   3266 /*
   3267  * wm_watchdog:		[ifnet interface function]
   3268  *
   3269  *	Watchdog timer handler.
   3270  */
   3271 static void
   3272 wm_watchdog(struct ifnet *ifp)
   3273 {
   3274 	int qid;
   3275 	struct wm_softc *sc = ifp->if_softc;
   3276 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3277 
   3278 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3279 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3280 
   3281 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3282 	}
   3283 
   3284 	/* IF any of queues hanged up, reset the interface. */
   3285 	if (hang_queue != 0) {
   3286 		(void)wm_init(ifp);
   3287 
   3288 		/*
   3289 		 * There are still some upper layer processing which call
   3290 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3291 		 */
   3292 		/* Try to get more packets going. */
   3293 		ifp->if_start(ifp);
   3294 	}
   3295 }
   3296 
   3297 
   3298 static void
   3299 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3300 {
   3301 
   3302 	mutex_enter(txq->txq_lock);
   3303 	if (txq->txq_sending &&
   3304 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3305 		wm_watchdog_txq_locked(ifp, txq, hang);
   3306 
   3307 	mutex_exit(txq->txq_lock);
   3308 }
   3309 
   3310 static void
   3311 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3312     uint16_t *hang)
   3313 {
   3314 	struct wm_softc *sc = ifp->if_softc;
   3315 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3316 
   3317 	KASSERT(mutex_owned(txq->txq_lock));
   3318 
   3319 	/*
   3320 	 * Since we're using delayed interrupts, sweep up
   3321 	 * before we report an error.
   3322 	 */
   3323 	wm_txeof(txq, UINT_MAX);
   3324 
   3325 	if (txq->txq_sending)
   3326 		*hang |= __BIT(wmq->wmq_id);
   3327 
   3328 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3329 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3330 		    device_xname(sc->sc_dev));
   3331 	} else {
   3332 #ifdef WM_DEBUG
   3333 		int i, j;
   3334 		struct wm_txsoft *txs;
   3335 #endif
   3336 		log(LOG_ERR,
   3337 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3338 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3339 		    txq->txq_next);
   3340 		if_statinc(ifp, if_oerrors);
   3341 #ifdef WM_DEBUG
   3342 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3343 		    i = WM_NEXTTXS(txq, i)) {
   3344 			txs = &txq->txq_soft[i];
   3345 			printf("txs %d tx %d -> %d\n",
   3346 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3347 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3348 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3349 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3350 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3351 					printf("\t %#08x%08x\n",
   3352 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3353 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3354 				} else {
   3355 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3356 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3357 					    txq->txq_descs[j].wtx_addr.wa_low);
   3358 					printf("\t %#04x%02x%02x%08x\n",
   3359 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3360 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3361 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3362 					    txq->txq_descs[j].wtx_cmdlen);
   3363 				}
   3364 				if (j == txs->txs_lastdesc)
   3365 					break;
   3366 			}
   3367 		}
   3368 #endif
   3369 	}
   3370 }
   3371 
   3372 /*
   3373  * wm_tick:
   3374  *
   3375  *	One second timer, used to check link status, sweep up
   3376  *	completed transmit jobs, etc.
   3377  */
   3378 static void
   3379 wm_tick(void *arg)
   3380 {
   3381 	struct wm_softc *sc = arg;
   3382 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3383 #ifndef WM_MPSAFE
   3384 	int s = splnet();
   3385 #endif
   3386 
   3387 	WM_CORE_LOCK(sc);
   3388 
   3389 	if (sc->sc_core_stopping) {
   3390 		WM_CORE_UNLOCK(sc);
   3391 #ifndef WM_MPSAFE
   3392 		splx(s);
   3393 #endif
   3394 		return;
   3395 	}
   3396 
   3397 	if (sc->sc_type >= WM_T_82542_2_1) {
   3398 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3399 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3400 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3401 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3402 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3403 	}
   3404 
   3405 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3406 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3407 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3408 	    + CSR_READ(sc, WMREG_CRCERRS)
   3409 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3410 	    + CSR_READ(sc, WMREG_SYMERRC)
   3411 	    + CSR_READ(sc, WMREG_RXERRC)
   3412 	    + CSR_READ(sc, WMREG_SEC)
   3413 	    + CSR_READ(sc, WMREG_CEXTERR)
   3414 	    + CSR_READ(sc, WMREG_RLEC));
   3415 	/*
   3416 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3417 	 * memory. It does not mean the number of dropped packet. Because
   3418 	 * ethernet controller can receive packets in such case if there is
   3419 	 * space in phy's FIFO.
   3420 	 *
   3421 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3422 	 * own EVCNT instead of if_iqdrops.
   3423 	 */
   3424 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3425 	IF_STAT_PUTREF(ifp);
   3426 
   3427 	if (sc->sc_flags & WM_F_HAS_MII)
   3428 		mii_tick(&sc->sc_mii);
   3429 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3430 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3431 		wm_serdes_tick(sc);
   3432 	else
   3433 		wm_tbi_tick(sc);
   3434 
   3435 	WM_CORE_UNLOCK(sc);
   3436 
   3437 	wm_watchdog(ifp);
   3438 
   3439 	callout_schedule(&sc->sc_tick_ch, hz);
   3440 }
   3441 
   3442 static int
   3443 wm_ifflags_cb(struct ethercom *ec)
   3444 {
   3445 	struct ifnet *ifp = &ec->ec_if;
   3446 	struct wm_softc *sc = ifp->if_softc;
   3447 	u_short iffchange;
   3448 	int ecchange;
   3449 	bool needreset = false;
   3450 	int rc = 0;
   3451 
   3452 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3453 		device_xname(sc->sc_dev), __func__));
   3454 
   3455 	WM_CORE_LOCK(sc);
   3456 
   3457 	/*
   3458 	 * Check for if_flags.
   3459 	 * Main usage is to prevent linkdown when opening bpf.
   3460 	 */
   3461 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3462 	sc->sc_if_flags = ifp->if_flags;
   3463 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3464 		needreset = true;
   3465 		goto ec;
   3466 	}
   3467 
   3468 	/* iff related updates */
   3469 	if ((iffchange & IFF_PROMISC) != 0)
   3470 		wm_set_filter(sc);
   3471 
   3472 	wm_set_vlan(sc);
   3473 
   3474 ec:
   3475 	/* Check for ec_capenable. */
   3476 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3477 	sc->sc_ec_capenable = ec->ec_capenable;
   3478 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3479 		needreset = true;
   3480 		goto out;
   3481 	}
   3482 
   3483 	/* ec related updates */
   3484 	wm_set_eee(sc);
   3485 
   3486 out:
   3487 	if (needreset)
   3488 		rc = ENETRESET;
   3489 	WM_CORE_UNLOCK(sc);
   3490 
   3491 	return rc;
   3492 }
   3493 
   3494 static bool
   3495 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3496 {
   3497 
   3498 	switch(sc->sc_phytype) {
   3499 	case WMPHY_82577: /* ihphy */
   3500 	case WMPHY_82578: /* atphy */
   3501 	case WMPHY_82579: /* ihphy */
   3502 	case WMPHY_I217: /* ihphy */
   3503 	case WMPHY_82580: /* ihphy */
   3504 	case WMPHY_I350: /* ihphy */
   3505 		return true;
   3506 	default:
   3507 		return false;
   3508 	}
   3509 }
   3510 
   3511 static void
   3512 wm_set_linkdown_discard(struct wm_softc *sc)
   3513 {
   3514 
   3515 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3516 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3517 
   3518 		mutex_enter(txq->txq_lock);
   3519 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3520 		mutex_exit(txq->txq_lock);
   3521 	}
   3522 }
   3523 
   3524 static void
   3525 wm_clear_linkdown_discard(struct wm_softc *sc)
   3526 {
   3527 
   3528 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3529 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3530 
   3531 		mutex_enter(txq->txq_lock);
   3532 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   3533 		mutex_exit(txq->txq_lock);
   3534 	}
   3535 }
   3536 
   3537 /*
   3538  * wm_ioctl:		[ifnet interface function]
   3539  *
   3540  *	Handle control requests from the operator.
   3541  */
   3542 static int
   3543 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3544 {
   3545 	struct wm_softc *sc = ifp->if_softc;
   3546 	struct ifreq *ifr = (struct ifreq *)data;
   3547 	struct ifaddr *ifa = (struct ifaddr *)data;
   3548 	struct sockaddr_dl *sdl;
   3549 	int s, error;
   3550 
   3551 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3552 		device_xname(sc->sc_dev), __func__));
   3553 
   3554 #ifndef WM_MPSAFE
   3555 	s = splnet();
   3556 #endif
   3557 	switch (cmd) {
   3558 	case SIOCSIFMEDIA:
   3559 		WM_CORE_LOCK(sc);
   3560 		/* Flow control requires full-duplex mode. */
   3561 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3562 		    (ifr->ifr_media & IFM_FDX) == 0)
   3563 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3564 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3565 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3566 				/* We can do both TXPAUSE and RXPAUSE. */
   3567 				ifr->ifr_media |=
   3568 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3569 			}
   3570 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3571 		}
   3572 		WM_CORE_UNLOCK(sc);
   3573 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3574 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   3575 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE)
   3576 				wm_set_linkdown_discard(sc);
   3577 			else
   3578 				wm_clear_linkdown_discard(sc);
   3579 		}
   3580 		break;
   3581 	case SIOCINITIFADDR:
   3582 		WM_CORE_LOCK(sc);
   3583 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3584 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3585 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3586 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3587 			/* Unicast address is the first multicast entry */
   3588 			wm_set_filter(sc);
   3589 			error = 0;
   3590 			WM_CORE_UNLOCK(sc);
   3591 			break;
   3592 		}
   3593 		WM_CORE_UNLOCK(sc);
   3594 		if (((ifp->if_flags & IFF_UP) == 0) && wm_phy_need_linkdown_discard(sc))
   3595 			wm_clear_linkdown_discard(sc);
   3596 		/*FALLTHROUGH*/
   3597 	default:
   3598 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   3599 			if (((ifp->if_flags & IFF_UP) == 0) && ((ifr->ifr_flags & IFF_UP) != 0)) {
   3600 				wm_clear_linkdown_discard(sc);
   3601 			} else if (((ifp->if_flags & IFF_UP) != 0) && ((ifr->ifr_flags & IFF_UP) == 0)) {
   3602 				wm_set_linkdown_discard(sc);
   3603 			}
   3604 		}
   3605 #ifdef WM_MPSAFE
   3606 		s = splnet();
   3607 #endif
   3608 		/* It may call wm_start, so unlock here */
   3609 		error = ether_ioctl(ifp, cmd, data);
   3610 #ifdef WM_MPSAFE
   3611 		splx(s);
   3612 #endif
   3613 		if (error != ENETRESET)
   3614 			break;
   3615 
   3616 		error = 0;
   3617 
   3618 		if (cmd == SIOCSIFCAP)
   3619 			error = (*ifp->if_init)(ifp);
   3620 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3621 			;
   3622 		else if (ifp->if_flags & IFF_RUNNING) {
   3623 			/*
   3624 			 * Multicast list has changed; set the hardware filter
   3625 			 * accordingly.
   3626 			 */
   3627 			WM_CORE_LOCK(sc);
   3628 			wm_set_filter(sc);
   3629 			WM_CORE_UNLOCK(sc);
   3630 		}
   3631 		break;
   3632 	}
   3633 
   3634 #ifndef WM_MPSAFE
   3635 	splx(s);
   3636 #endif
   3637 	return error;
   3638 }
   3639 
   3640 /* MAC address related */
   3641 
   3642 /*
   3643  * Get the offset of MAC address and return it.
   3644  * If error occured, use offset 0.
   3645  */
   3646 static uint16_t
   3647 wm_check_alt_mac_addr(struct wm_softc *sc)
   3648 {
   3649 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3650 	uint16_t offset = NVM_OFF_MACADDR;
   3651 
   3652 	/* Try to read alternative MAC address pointer */
   3653 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3654 		return 0;
   3655 
   3656 	/* Check pointer if it's valid or not. */
   3657 	if ((offset == 0x0000) || (offset == 0xffff))
   3658 		return 0;
   3659 
   3660 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3661 	/*
   3662 	 * Check whether alternative MAC address is valid or not.
   3663 	 * Some cards have non 0xffff pointer but those don't use
   3664 	 * alternative MAC address in reality.
   3665 	 *
   3666 	 * Check whether the broadcast bit is set or not.
   3667 	 */
   3668 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3669 		if (((myea[0] & 0xff) & 0x01) == 0)
   3670 			return offset; /* Found */
   3671 
   3672 	/* Not found */
   3673 	return 0;
   3674 }
   3675 
   3676 static int
   3677 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3678 {
   3679 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3680 	uint16_t offset = NVM_OFF_MACADDR;
   3681 	int do_invert = 0;
   3682 
   3683 	switch (sc->sc_type) {
   3684 	case WM_T_82580:
   3685 	case WM_T_I350:
   3686 	case WM_T_I354:
   3687 		/* EEPROM Top Level Partitioning */
   3688 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3689 		break;
   3690 	case WM_T_82571:
   3691 	case WM_T_82575:
   3692 	case WM_T_82576:
   3693 	case WM_T_80003:
   3694 	case WM_T_I210:
   3695 	case WM_T_I211:
   3696 		offset = wm_check_alt_mac_addr(sc);
   3697 		if (offset == 0)
   3698 			if ((sc->sc_funcid & 0x01) == 1)
   3699 				do_invert = 1;
   3700 		break;
   3701 	default:
   3702 		if ((sc->sc_funcid & 0x01) == 1)
   3703 			do_invert = 1;
   3704 		break;
   3705 	}
   3706 
   3707 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3708 		goto bad;
   3709 
   3710 	enaddr[0] = myea[0] & 0xff;
   3711 	enaddr[1] = myea[0] >> 8;
   3712 	enaddr[2] = myea[1] & 0xff;
   3713 	enaddr[3] = myea[1] >> 8;
   3714 	enaddr[4] = myea[2] & 0xff;
   3715 	enaddr[5] = myea[2] >> 8;
   3716 
   3717 	/*
   3718 	 * Toggle the LSB of the MAC address on the second port
   3719 	 * of some dual port cards.
   3720 	 */
   3721 	if (do_invert != 0)
   3722 		enaddr[5] ^= 1;
   3723 
   3724 	return 0;
   3725 
   3726  bad:
   3727 	return -1;
   3728 }
   3729 
   3730 /*
   3731  * wm_set_ral:
   3732  *
   3733  *	Set an entery in the receive address list.
   3734  */
   3735 static void
   3736 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3737 {
   3738 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3739 	uint32_t wlock_mac;
   3740 	int rv;
   3741 
   3742 	if (enaddr != NULL) {
   3743 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3744 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3745 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3746 		ral_hi |= RAL_AV;
   3747 	} else {
   3748 		ral_lo = 0;
   3749 		ral_hi = 0;
   3750 	}
   3751 
   3752 	switch (sc->sc_type) {
   3753 	case WM_T_82542_2_0:
   3754 	case WM_T_82542_2_1:
   3755 	case WM_T_82543:
   3756 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3757 		CSR_WRITE_FLUSH(sc);
   3758 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3759 		CSR_WRITE_FLUSH(sc);
   3760 		break;
   3761 	case WM_T_PCH2:
   3762 	case WM_T_PCH_LPT:
   3763 	case WM_T_PCH_SPT:
   3764 	case WM_T_PCH_CNP:
   3765 		if (idx == 0) {
   3766 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3767 			CSR_WRITE_FLUSH(sc);
   3768 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3769 			CSR_WRITE_FLUSH(sc);
   3770 			return;
   3771 		}
   3772 		if (sc->sc_type != WM_T_PCH2) {
   3773 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3774 			    FWSM_WLOCK_MAC);
   3775 			addrl = WMREG_SHRAL(idx - 1);
   3776 			addrh = WMREG_SHRAH(idx - 1);
   3777 		} else {
   3778 			wlock_mac = 0;
   3779 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3780 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3781 		}
   3782 
   3783 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3784 			rv = wm_get_swflag_ich8lan(sc);
   3785 			if (rv != 0)
   3786 				return;
   3787 			CSR_WRITE(sc, addrl, ral_lo);
   3788 			CSR_WRITE_FLUSH(sc);
   3789 			CSR_WRITE(sc, addrh, ral_hi);
   3790 			CSR_WRITE_FLUSH(sc);
   3791 			wm_put_swflag_ich8lan(sc);
   3792 		}
   3793 
   3794 		break;
   3795 	default:
   3796 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3797 		CSR_WRITE_FLUSH(sc);
   3798 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3799 		CSR_WRITE_FLUSH(sc);
   3800 		break;
   3801 	}
   3802 }
   3803 
   3804 /*
   3805  * wm_mchash:
   3806  *
   3807  *	Compute the hash of the multicast address for the 4096-bit
   3808  *	multicast filter.
   3809  */
   3810 static uint32_t
   3811 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3812 {
   3813 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3814 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3815 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3816 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3817 	uint32_t hash;
   3818 
   3819 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3820 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3821 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3822 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3823 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3824 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3825 		return (hash & 0x3ff);
   3826 	}
   3827 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3828 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3829 
   3830 	return (hash & 0xfff);
   3831 }
   3832 
   3833 /*
   3834  *
   3835  *
   3836  */
   3837 static int
   3838 wm_rar_count(struct wm_softc *sc)
   3839 {
   3840 	int size;
   3841 
   3842 	switch (sc->sc_type) {
   3843 	case WM_T_ICH8:
   3844 		size = WM_RAL_TABSIZE_ICH8 -1;
   3845 		break;
   3846 	case WM_T_ICH9:
   3847 	case WM_T_ICH10:
   3848 	case WM_T_PCH:
   3849 		size = WM_RAL_TABSIZE_ICH8;
   3850 		break;
   3851 	case WM_T_PCH2:
   3852 		size = WM_RAL_TABSIZE_PCH2;
   3853 		break;
   3854 	case WM_T_PCH_LPT:
   3855 	case WM_T_PCH_SPT:
   3856 	case WM_T_PCH_CNP:
   3857 		size = WM_RAL_TABSIZE_PCH_LPT;
   3858 		break;
   3859 	case WM_T_82575:
   3860 	case WM_T_I210:
   3861 	case WM_T_I211:
   3862 		size = WM_RAL_TABSIZE_82575;
   3863 		break;
   3864 	case WM_T_82576:
   3865 	case WM_T_82580:
   3866 		size = WM_RAL_TABSIZE_82576;
   3867 		break;
   3868 	case WM_T_I350:
   3869 	case WM_T_I354:
   3870 		size = WM_RAL_TABSIZE_I350;
   3871 		break;
   3872 	default:
   3873 		size = WM_RAL_TABSIZE;
   3874 	}
   3875 
   3876 	return size;
   3877 }
   3878 
   3879 /*
   3880  * wm_set_filter:
   3881  *
   3882  *	Set up the receive filter.
   3883  */
   3884 static void
   3885 wm_set_filter(struct wm_softc *sc)
   3886 {
   3887 	struct ethercom *ec = &sc->sc_ethercom;
   3888 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3889 	struct ether_multi *enm;
   3890 	struct ether_multistep step;
   3891 	bus_addr_t mta_reg;
   3892 	uint32_t hash, reg, bit;
   3893 	int i, size, ralmax, rv;
   3894 
   3895 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3896 		device_xname(sc->sc_dev), __func__));
   3897 
   3898 	if (sc->sc_type >= WM_T_82544)
   3899 		mta_reg = WMREG_CORDOVA_MTA;
   3900 	else
   3901 		mta_reg = WMREG_MTA;
   3902 
   3903 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3904 
   3905 	if (ifp->if_flags & IFF_BROADCAST)
   3906 		sc->sc_rctl |= RCTL_BAM;
   3907 	if (ifp->if_flags & IFF_PROMISC) {
   3908 		sc->sc_rctl |= RCTL_UPE;
   3909 		ETHER_LOCK(ec);
   3910 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3911 		ETHER_UNLOCK(ec);
   3912 		goto allmulti;
   3913 	}
   3914 
   3915 	/*
   3916 	 * Set the station address in the first RAL slot, and
   3917 	 * clear the remaining slots.
   3918 	 */
   3919 	size = wm_rar_count(sc);
   3920 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3921 
   3922 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3923 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3924 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3925 		switch (i) {
   3926 		case 0:
   3927 			/* We can use all entries */
   3928 			ralmax = size;
   3929 			break;
   3930 		case 1:
   3931 			/* Only RAR[0] */
   3932 			ralmax = 1;
   3933 			break;
   3934 		default:
   3935 			/* Available SHRA + RAR[0] */
   3936 			ralmax = i + 1;
   3937 		}
   3938 	} else
   3939 		ralmax = size;
   3940 	for (i = 1; i < size; i++) {
   3941 		if (i < ralmax)
   3942 			wm_set_ral(sc, NULL, i);
   3943 	}
   3944 
   3945 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3946 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3947 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3948 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3949 		size = WM_ICH8_MC_TABSIZE;
   3950 	else
   3951 		size = WM_MC_TABSIZE;
   3952 	/* Clear out the multicast table. */
   3953 	for (i = 0; i < size; i++) {
   3954 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3955 		CSR_WRITE_FLUSH(sc);
   3956 	}
   3957 
   3958 	ETHER_LOCK(ec);
   3959 	ETHER_FIRST_MULTI(step, ec, enm);
   3960 	while (enm != NULL) {
   3961 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3962 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3963 			ETHER_UNLOCK(ec);
   3964 			/*
   3965 			 * We must listen to a range of multicast addresses.
   3966 			 * For now, just accept all multicasts, rather than
   3967 			 * trying to set only those filter bits needed to match
   3968 			 * the range.  (At this time, the only use of address
   3969 			 * ranges is for IP multicast routing, for which the
   3970 			 * range is big enough to require all bits set.)
   3971 			 */
   3972 			goto allmulti;
   3973 		}
   3974 
   3975 		hash = wm_mchash(sc, enm->enm_addrlo);
   3976 
   3977 		reg = (hash >> 5);
   3978 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3979 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3980 		    || (sc->sc_type == WM_T_PCH2)
   3981 		    || (sc->sc_type == WM_T_PCH_LPT)
   3982 		    || (sc->sc_type == WM_T_PCH_SPT)
   3983 		    || (sc->sc_type == WM_T_PCH_CNP))
   3984 			reg &= 0x1f;
   3985 		else
   3986 			reg &= 0x7f;
   3987 		bit = hash & 0x1f;
   3988 
   3989 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3990 		hash |= 1U << bit;
   3991 
   3992 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3993 			/*
   3994 			 * 82544 Errata 9: Certain register cannot be written
   3995 			 * with particular alignments in PCI-X bus operation
   3996 			 * (FCAH, MTA and VFTA).
   3997 			 */
   3998 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3999 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4000 			CSR_WRITE_FLUSH(sc);
   4001 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4002 			CSR_WRITE_FLUSH(sc);
   4003 		} else {
   4004 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4005 			CSR_WRITE_FLUSH(sc);
   4006 		}
   4007 
   4008 		ETHER_NEXT_MULTI(step, enm);
   4009 	}
   4010 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4011 	ETHER_UNLOCK(ec);
   4012 
   4013 	goto setit;
   4014 
   4015  allmulti:
   4016 	sc->sc_rctl |= RCTL_MPE;
   4017 
   4018  setit:
   4019 	if (sc->sc_type >= WM_T_PCH2) {
   4020 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4021 		    && (ifp->if_mtu > ETHERMTU))
   4022 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4023 		else
   4024 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4025 		if (rv != 0)
   4026 			device_printf(sc->sc_dev,
   4027 			    "Failed to do workaround for jumbo frame.\n");
   4028 	}
   4029 
   4030 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4031 }
   4032 
   4033 /* Reset and init related */
   4034 
   4035 static void
   4036 wm_set_vlan(struct wm_softc *sc)
   4037 {
   4038 
   4039 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4040 		device_xname(sc->sc_dev), __func__));
   4041 
   4042 	/* Deal with VLAN enables. */
   4043 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4044 		sc->sc_ctrl |= CTRL_VME;
   4045 	else
   4046 		sc->sc_ctrl &= ~CTRL_VME;
   4047 
   4048 	/* Write the control registers. */
   4049 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4050 }
   4051 
   4052 static void
   4053 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4054 {
   4055 	uint32_t gcr;
   4056 	pcireg_t ctrl2;
   4057 
   4058 	gcr = CSR_READ(sc, WMREG_GCR);
   4059 
   4060 	/* Only take action if timeout value is defaulted to 0 */
   4061 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4062 		goto out;
   4063 
   4064 	if ((gcr & GCR_CAP_VER2) == 0) {
   4065 		gcr |= GCR_CMPL_TMOUT_10MS;
   4066 		goto out;
   4067 	}
   4068 
   4069 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4070 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4071 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4072 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4073 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4074 
   4075 out:
   4076 	/* Disable completion timeout resend */
   4077 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4078 
   4079 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4080 }
   4081 
   4082 void
   4083 wm_get_auto_rd_done(struct wm_softc *sc)
   4084 {
   4085 	int i;
   4086 
   4087 	/* wait for eeprom to reload */
   4088 	switch (sc->sc_type) {
   4089 	case WM_T_82571:
   4090 	case WM_T_82572:
   4091 	case WM_T_82573:
   4092 	case WM_T_82574:
   4093 	case WM_T_82583:
   4094 	case WM_T_82575:
   4095 	case WM_T_82576:
   4096 	case WM_T_82580:
   4097 	case WM_T_I350:
   4098 	case WM_T_I354:
   4099 	case WM_T_I210:
   4100 	case WM_T_I211:
   4101 	case WM_T_80003:
   4102 	case WM_T_ICH8:
   4103 	case WM_T_ICH9:
   4104 		for (i = 0; i < 10; i++) {
   4105 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4106 				break;
   4107 			delay(1000);
   4108 		}
   4109 		if (i == 10) {
   4110 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4111 			    "complete\n", device_xname(sc->sc_dev));
   4112 		}
   4113 		break;
   4114 	default:
   4115 		break;
   4116 	}
   4117 }
   4118 
   4119 void
   4120 wm_lan_init_done(struct wm_softc *sc)
   4121 {
   4122 	uint32_t reg = 0;
   4123 	int i;
   4124 
   4125 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4126 		device_xname(sc->sc_dev), __func__));
   4127 
   4128 	/* Wait for eeprom to reload */
   4129 	switch (sc->sc_type) {
   4130 	case WM_T_ICH10:
   4131 	case WM_T_PCH:
   4132 	case WM_T_PCH2:
   4133 	case WM_T_PCH_LPT:
   4134 	case WM_T_PCH_SPT:
   4135 	case WM_T_PCH_CNP:
   4136 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4137 			reg = CSR_READ(sc, WMREG_STATUS);
   4138 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4139 				break;
   4140 			delay(100);
   4141 		}
   4142 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4143 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4144 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4145 		}
   4146 		break;
   4147 	default:
   4148 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4149 		    __func__);
   4150 		break;
   4151 	}
   4152 
   4153 	reg &= ~STATUS_LAN_INIT_DONE;
   4154 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4155 }
   4156 
   4157 void
   4158 wm_get_cfg_done(struct wm_softc *sc)
   4159 {
   4160 	int mask;
   4161 	uint32_t reg;
   4162 	int i;
   4163 
   4164 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4165 		device_xname(sc->sc_dev), __func__));
   4166 
   4167 	/* Wait for eeprom to reload */
   4168 	switch (sc->sc_type) {
   4169 	case WM_T_82542_2_0:
   4170 	case WM_T_82542_2_1:
   4171 		/* null */
   4172 		break;
   4173 	case WM_T_82543:
   4174 	case WM_T_82544:
   4175 	case WM_T_82540:
   4176 	case WM_T_82545:
   4177 	case WM_T_82545_3:
   4178 	case WM_T_82546:
   4179 	case WM_T_82546_3:
   4180 	case WM_T_82541:
   4181 	case WM_T_82541_2:
   4182 	case WM_T_82547:
   4183 	case WM_T_82547_2:
   4184 	case WM_T_82573:
   4185 	case WM_T_82574:
   4186 	case WM_T_82583:
   4187 		/* generic */
   4188 		delay(10*1000);
   4189 		break;
   4190 	case WM_T_80003:
   4191 	case WM_T_82571:
   4192 	case WM_T_82572:
   4193 	case WM_T_82575:
   4194 	case WM_T_82576:
   4195 	case WM_T_82580:
   4196 	case WM_T_I350:
   4197 	case WM_T_I354:
   4198 	case WM_T_I210:
   4199 	case WM_T_I211:
   4200 		if (sc->sc_type == WM_T_82571) {
   4201 			/* Only 82571 shares port 0 */
   4202 			mask = EEMNGCTL_CFGDONE_0;
   4203 		} else
   4204 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4205 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4206 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4207 				break;
   4208 			delay(1000);
   4209 		}
   4210 		if (i >= WM_PHY_CFG_TIMEOUT)
   4211 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4212 				device_xname(sc->sc_dev), __func__));
   4213 		break;
   4214 	case WM_T_ICH8:
   4215 	case WM_T_ICH9:
   4216 	case WM_T_ICH10:
   4217 	case WM_T_PCH:
   4218 	case WM_T_PCH2:
   4219 	case WM_T_PCH_LPT:
   4220 	case WM_T_PCH_SPT:
   4221 	case WM_T_PCH_CNP:
   4222 		delay(10*1000);
   4223 		if (sc->sc_type >= WM_T_ICH10)
   4224 			wm_lan_init_done(sc);
   4225 		else
   4226 			wm_get_auto_rd_done(sc);
   4227 
   4228 		/* Clear PHY Reset Asserted bit */
   4229 		reg = CSR_READ(sc, WMREG_STATUS);
   4230 		if ((reg & STATUS_PHYRA) != 0)
   4231 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4232 		break;
   4233 	default:
   4234 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4235 		    __func__);
   4236 		break;
   4237 	}
   4238 }
   4239 
   4240 int
   4241 wm_phy_post_reset(struct wm_softc *sc)
   4242 {
   4243 	device_t dev = sc->sc_dev;
   4244 	uint16_t reg;
   4245 	int rv = 0;
   4246 
   4247 	/* This function is only for ICH8 and newer. */
   4248 	if (sc->sc_type < WM_T_ICH8)
   4249 		return 0;
   4250 
   4251 	if (wm_phy_resetisblocked(sc)) {
   4252 		/* XXX */
   4253 		device_printf(dev, "PHY is blocked\n");
   4254 		return -1;
   4255 	}
   4256 
   4257 	/* Allow time for h/w to get to quiescent state after reset */
   4258 	delay(10*1000);
   4259 
   4260 	/* Perform any necessary post-reset workarounds */
   4261 	if (sc->sc_type == WM_T_PCH)
   4262 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4263 	else if (sc->sc_type == WM_T_PCH2)
   4264 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4265 	if (rv != 0)
   4266 		return rv;
   4267 
   4268 	/* Clear the host wakeup bit after lcd reset */
   4269 	if (sc->sc_type >= WM_T_PCH) {
   4270 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4271 		reg &= ~BM_WUC_HOST_WU_BIT;
   4272 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4273 	}
   4274 
   4275 	/* Configure the LCD with the extended configuration region in NVM */
   4276 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4277 		return rv;
   4278 
   4279 	/* Configure the LCD with the OEM bits in NVM */
   4280 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4281 
   4282 	if (sc->sc_type == WM_T_PCH2) {
   4283 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4284 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4285 			delay(10 * 1000);
   4286 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4287 		}
   4288 		/* Set EEE LPI Update Timer to 200usec */
   4289 		rv = sc->phy.acquire(sc);
   4290 		if (rv)
   4291 			return rv;
   4292 		rv = wm_write_emi_reg_locked(dev,
   4293 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4294 		sc->phy.release(sc);
   4295 	}
   4296 
   4297 	return rv;
   4298 }
   4299 
   4300 /* Only for PCH and newer */
   4301 static int
   4302 wm_write_smbus_addr(struct wm_softc *sc)
   4303 {
   4304 	uint32_t strap, freq;
   4305 	uint16_t phy_data;
   4306 	int rv;
   4307 
   4308 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4309 		device_xname(sc->sc_dev), __func__));
   4310 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4311 
   4312 	strap = CSR_READ(sc, WMREG_STRAP);
   4313 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4314 
   4315 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4316 	if (rv != 0)
   4317 		return -1;
   4318 
   4319 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4320 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4321 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4322 
   4323 	if (sc->sc_phytype == WMPHY_I217) {
   4324 		/* Restore SMBus frequency */
   4325 		if (freq --) {
   4326 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4327 			    | HV_SMB_ADDR_FREQ_HIGH);
   4328 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4329 			    HV_SMB_ADDR_FREQ_LOW);
   4330 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4331 			    HV_SMB_ADDR_FREQ_HIGH);
   4332 		} else
   4333 			DPRINTF(sc, WM_DEBUG_INIT,
   4334 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4335 				device_xname(sc->sc_dev), __func__));
   4336 	}
   4337 
   4338 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4339 	    phy_data);
   4340 }
   4341 
   4342 static int
   4343 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4344 {
   4345 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4346 	uint16_t phy_page = 0;
   4347 	int rv = 0;
   4348 
   4349 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4350 		device_xname(sc->sc_dev), __func__));
   4351 
   4352 	switch (sc->sc_type) {
   4353 	case WM_T_ICH8:
   4354 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4355 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4356 			return 0;
   4357 
   4358 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4359 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4360 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4361 			break;
   4362 		}
   4363 		/* FALLTHROUGH */
   4364 	case WM_T_PCH:
   4365 	case WM_T_PCH2:
   4366 	case WM_T_PCH_LPT:
   4367 	case WM_T_PCH_SPT:
   4368 	case WM_T_PCH_CNP:
   4369 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4370 		break;
   4371 	default:
   4372 		return 0;
   4373 	}
   4374 
   4375 	if ((rv = sc->phy.acquire(sc)) != 0)
   4376 		return rv;
   4377 
   4378 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4379 	if ((reg & sw_cfg_mask) == 0)
   4380 		goto release;
   4381 
   4382 	/*
   4383 	 * Make sure HW does not configure LCD from PHY extended configuration
   4384 	 * before SW configuration
   4385 	 */
   4386 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4387 	if ((sc->sc_type < WM_T_PCH2)
   4388 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4389 		goto release;
   4390 
   4391 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4392 		device_xname(sc->sc_dev), __func__));
   4393 	/* word_addr is in DWORD */
   4394 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4395 
   4396 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4397 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4398 	if (cnf_size == 0)
   4399 		goto release;
   4400 
   4401 	if (((sc->sc_type == WM_T_PCH)
   4402 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4403 	    || (sc->sc_type > WM_T_PCH)) {
   4404 		/*
   4405 		 * HW configures the SMBus address and LEDs when the OEM and
   4406 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4407 		 * are cleared, SW will configure them instead.
   4408 		 */
   4409 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4410 			device_xname(sc->sc_dev), __func__));
   4411 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4412 			goto release;
   4413 
   4414 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4415 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4416 		    (uint16_t)reg);
   4417 		if (rv != 0)
   4418 			goto release;
   4419 	}
   4420 
   4421 	/* Configure LCD from extended configuration region. */
   4422 	for (i = 0; i < cnf_size; i++) {
   4423 		uint16_t reg_data, reg_addr;
   4424 
   4425 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4426 			goto release;
   4427 
   4428 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4429 			goto release;
   4430 
   4431 		if (reg_addr == IGPHY_PAGE_SELECT)
   4432 			phy_page = reg_data;
   4433 
   4434 		reg_addr &= IGPHY_MAXREGADDR;
   4435 		reg_addr |= phy_page;
   4436 
   4437 		KASSERT(sc->phy.writereg_locked != NULL);
   4438 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4439 		    reg_data);
   4440 	}
   4441 
   4442 release:
   4443 	sc->phy.release(sc);
   4444 	return rv;
   4445 }
   4446 
   4447 /*
   4448  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4449  *  @sc:       pointer to the HW structure
   4450  *  @d0_state: boolean if entering d0 or d3 device state
   4451  *
   4452  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4453  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4454  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4455  */
   4456 int
   4457 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4458 {
   4459 	uint32_t mac_reg;
   4460 	uint16_t oem_reg;
   4461 	int rv;
   4462 
   4463 	if (sc->sc_type < WM_T_PCH)
   4464 		return 0;
   4465 
   4466 	rv = sc->phy.acquire(sc);
   4467 	if (rv != 0)
   4468 		return rv;
   4469 
   4470 	if (sc->sc_type == WM_T_PCH) {
   4471 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4472 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4473 			goto release;
   4474 	}
   4475 
   4476 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4477 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4478 		goto release;
   4479 
   4480 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4481 
   4482 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4483 	if (rv != 0)
   4484 		goto release;
   4485 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4486 
   4487 	if (d0_state) {
   4488 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4489 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4490 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4491 			oem_reg |= HV_OEM_BITS_LPLU;
   4492 	} else {
   4493 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4494 		    != 0)
   4495 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4496 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4497 		    != 0)
   4498 			oem_reg |= HV_OEM_BITS_LPLU;
   4499 	}
   4500 
   4501 	/* Set Restart auto-neg to activate the bits */
   4502 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4503 	    && (wm_phy_resetisblocked(sc) == false))
   4504 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4505 
   4506 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4507 
   4508 release:
   4509 	sc->phy.release(sc);
   4510 
   4511 	return rv;
   4512 }
   4513 
   4514 /* Init hardware bits */
   4515 void
   4516 wm_initialize_hardware_bits(struct wm_softc *sc)
   4517 {
   4518 	uint32_t tarc0, tarc1, reg;
   4519 
   4520 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4521 		device_xname(sc->sc_dev), __func__));
   4522 
   4523 	/* For 82571 variant, 80003 and ICHs */
   4524 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4525 	    || (sc->sc_type >= WM_T_80003)) {
   4526 
   4527 		/* Transmit Descriptor Control 0 */
   4528 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4529 		reg |= TXDCTL_COUNT_DESC;
   4530 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4531 
   4532 		/* Transmit Descriptor Control 1 */
   4533 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4534 		reg |= TXDCTL_COUNT_DESC;
   4535 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4536 
   4537 		/* TARC0 */
   4538 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4539 		switch (sc->sc_type) {
   4540 		case WM_T_82571:
   4541 		case WM_T_82572:
   4542 		case WM_T_82573:
   4543 		case WM_T_82574:
   4544 		case WM_T_82583:
   4545 		case WM_T_80003:
   4546 			/* Clear bits 30..27 */
   4547 			tarc0 &= ~__BITS(30, 27);
   4548 			break;
   4549 		default:
   4550 			break;
   4551 		}
   4552 
   4553 		switch (sc->sc_type) {
   4554 		case WM_T_82571:
   4555 		case WM_T_82572:
   4556 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4557 
   4558 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4559 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4560 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4561 			/* 8257[12] Errata No.7 */
   4562 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4563 
   4564 			/* TARC1 bit 28 */
   4565 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4566 				tarc1 &= ~__BIT(28);
   4567 			else
   4568 				tarc1 |= __BIT(28);
   4569 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4570 
   4571 			/*
   4572 			 * 8257[12] Errata No.13
   4573 			 * Disable Dyamic Clock Gating.
   4574 			 */
   4575 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4576 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4577 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4578 			break;
   4579 		case WM_T_82573:
   4580 		case WM_T_82574:
   4581 		case WM_T_82583:
   4582 			if ((sc->sc_type == WM_T_82574)
   4583 			    || (sc->sc_type == WM_T_82583))
   4584 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4585 
   4586 			/* Extended Device Control */
   4587 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4588 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4589 			reg |= __BIT(22);	/* Set bit 22 */
   4590 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4591 
   4592 			/* Device Control */
   4593 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4594 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4595 
   4596 			/* PCIe Control Register */
   4597 			/*
   4598 			 * 82573 Errata (unknown).
   4599 			 *
   4600 			 * 82574 Errata 25 and 82583 Errata 12
   4601 			 * "Dropped Rx Packets":
   4602 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4603 			 */
   4604 			reg = CSR_READ(sc, WMREG_GCR);
   4605 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4606 			CSR_WRITE(sc, WMREG_GCR, reg);
   4607 
   4608 			if ((sc->sc_type == WM_T_82574)
   4609 			    || (sc->sc_type == WM_T_82583)) {
   4610 				/*
   4611 				 * Document says this bit must be set for
   4612 				 * proper operation.
   4613 				 */
   4614 				reg = CSR_READ(sc, WMREG_GCR);
   4615 				reg |= __BIT(22);
   4616 				CSR_WRITE(sc, WMREG_GCR, reg);
   4617 
   4618 				/*
   4619 				 * Apply workaround for hardware errata
   4620 				 * documented in errata docs Fixes issue where
   4621 				 * some error prone or unreliable PCIe
   4622 				 * completions are occurring, particularly
   4623 				 * with ASPM enabled. Without fix, issue can
   4624 				 * cause Tx timeouts.
   4625 				 */
   4626 				reg = CSR_READ(sc, WMREG_GCR2);
   4627 				reg |= __BIT(0);
   4628 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4629 			}
   4630 			break;
   4631 		case WM_T_80003:
   4632 			/* TARC0 */
   4633 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4634 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4635 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4636 
   4637 			/* TARC1 bit 28 */
   4638 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4639 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4640 				tarc1 &= ~__BIT(28);
   4641 			else
   4642 				tarc1 |= __BIT(28);
   4643 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4644 			break;
   4645 		case WM_T_ICH8:
   4646 		case WM_T_ICH9:
   4647 		case WM_T_ICH10:
   4648 		case WM_T_PCH:
   4649 		case WM_T_PCH2:
   4650 		case WM_T_PCH_LPT:
   4651 		case WM_T_PCH_SPT:
   4652 		case WM_T_PCH_CNP:
   4653 			/* TARC0 */
   4654 			if (sc->sc_type == WM_T_ICH8) {
   4655 				/* Set TARC0 bits 29 and 28 */
   4656 				tarc0 |= __BITS(29, 28);
   4657 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4658 				tarc0 |= __BIT(29);
   4659 				/*
   4660 				 *  Drop bit 28. From Linux.
   4661 				 * See I218/I219 spec update
   4662 				 * "5. Buffer Overrun While the I219 is
   4663 				 * Processing DMA Transactions"
   4664 				 */
   4665 				tarc0 &= ~__BIT(28);
   4666 			}
   4667 			/* Set TARC0 bits 23,24,26,27 */
   4668 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4669 
   4670 			/* CTRL_EXT */
   4671 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4672 			reg |= __BIT(22);	/* Set bit 22 */
   4673 			/*
   4674 			 * Enable PHY low-power state when MAC is at D3
   4675 			 * w/o WoL
   4676 			 */
   4677 			if (sc->sc_type >= WM_T_PCH)
   4678 				reg |= CTRL_EXT_PHYPDEN;
   4679 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4680 
   4681 			/* TARC1 */
   4682 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4683 			/* bit 28 */
   4684 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4685 				tarc1 &= ~__BIT(28);
   4686 			else
   4687 				tarc1 |= __BIT(28);
   4688 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4689 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4690 
   4691 			/* Device Status */
   4692 			if (sc->sc_type == WM_T_ICH8) {
   4693 				reg = CSR_READ(sc, WMREG_STATUS);
   4694 				reg &= ~__BIT(31);
   4695 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4696 
   4697 			}
   4698 
   4699 			/* IOSFPC */
   4700 			if (sc->sc_type == WM_T_PCH_SPT) {
   4701 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4702 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4703 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4704 			}
   4705 			/*
   4706 			 * Work-around descriptor data corruption issue during
   4707 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4708 			 * capability.
   4709 			 */
   4710 			reg = CSR_READ(sc, WMREG_RFCTL);
   4711 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4712 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4713 			break;
   4714 		default:
   4715 			break;
   4716 		}
   4717 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4718 
   4719 		switch (sc->sc_type) {
   4720 		/*
   4721 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4722 		 * Avoid RSS Hash Value bug.
   4723 		 */
   4724 		case WM_T_82571:
   4725 		case WM_T_82572:
   4726 		case WM_T_82573:
   4727 		case WM_T_80003:
   4728 		case WM_T_ICH8:
   4729 			reg = CSR_READ(sc, WMREG_RFCTL);
   4730 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4731 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4732 			break;
   4733 		case WM_T_82574:
   4734 			/* Use extened Rx descriptor. */
   4735 			reg = CSR_READ(sc, WMREG_RFCTL);
   4736 			reg |= WMREG_RFCTL_EXSTEN;
   4737 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4738 			break;
   4739 		default:
   4740 			break;
   4741 		}
   4742 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4743 		/*
   4744 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4745 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4746 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4747 		 * Correctly by the Device"
   4748 		 *
   4749 		 * I354(C2000) Errata AVR53:
   4750 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4751 		 * Hang"
   4752 		 */
   4753 		reg = CSR_READ(sc, WMREG_RFCTL);
   4754 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4755 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4756 	}
   4757 }
   4758 
   4759 static uint32_t
   4760 wm_rxpbs_adjust_82580(uint32_t val)
   4761 {
   4762 	uint32_t rv = 0;
   4763 
   4764 	if (val < __arraycount(wm_82580_rxpbs_table))
   4765 		rv = wm_82580_rxpbs_table[val];
   4766 
   4767 	return rv;
   4768 }
   4769 
   4770 /*
   4771  * wm_reset_phy:
   4772  *
   4773  *	generic PHY reset function.
   4774  *	Same as e1000_phy_hw_reset_generic()
   4775  */
   4776 static int
   4777 wm_reset_phy(struct wm_softc *sc)
   4778 {
   4779 	uint32_t reg;
   4780 
   4781 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4782 		device_xname(sc->sc_dev), __func__));
   4783 	if (wm_phy_resetisblocked(sc))
   4784 		return -1;
   4785 
   4786 	sc->phy.acquire(sc);
   4787 
   4788 	reg = CSR_READ(sc, WMREG_CTRL);
   4789 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4790 	CSR_WRITE_FLUSH(sc);
   4791 
   4792 	delay(sc->phy.reset_delay_us);
   4793 
   4794 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4795 	CSR_WRITE_FLUSH(sc);
   4796 
   4797 	delay(150);
   4798 
   4799 	sc->phy.release(sc);
   4800 
   4801 	wm_get_cfg_done(sc);
   4802 	wm_phy_post_reset(sc);
   4803 
   4804 	return 0;
   4805 }
   4806 
   4807 /*
   4808  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4809  * so it is enough to check sc->sc_queue[0] only.
   4810  */
   4811 static void
   4812 wm_flush_desc_rings(struct wm_softc *sc)
   4813 {
   4814 	pcireg_t preg;
   4815 	uint32_t reg;
   4816 	struct wm_txqueue *txq;
   4817 	wiseman_txdesc_t *txd;
   4818 	int nexttx;
   4819 	uint32_t rctl;
   4820 
   4821 	/* First, disable MULR fix in FEXTNVM11 */
   4822 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4823 	reg |= FEXTNVM11_DIS_MULRFIX;
   4824 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4825 
   4826 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4827 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4828 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4829 		return;
   4830 
   4831 	/* TX */
   4832 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4833 	    preg, reg);
   4834 	reg = CSR_READ(sc, WMREG_TCTL);
   4835 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4836 
   4837 	txq = &sc->sc_queue[0].wmq_txq;
   4838 	nexttx = txq->txq_next;
   4839 	txd = &txq->txq_descs[nexttx];
   4840 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4841 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4842 	txd->wtx_fields.wtxu_status = 0;
   4843 	txd->wtx_fields.wtxu_options = 0;
   4844 	txd->wtx_fields.wtxu_vlan = 0;
   4845 
   4846 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4847 	    BUS_SPACE_BARRIER_WRITE);
   4848 
   4849 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4850 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4851 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4852 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4853 	delay(250);
   4854 
   4855 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4856 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4857 		return;
   4858 
   4859 	/* RX */
   4860 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4861 	rctl = CSR_READ(sc, WMREG_RCTL);
   4862 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4863 	CSR_WRITE_FLUSH(sc);
   4864 	delay(150);
   4865 
   4866 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4867 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4868 	reg &= 0xffffc000;
   4869 	/*
   4870 	 * Update thresholds: prefetch threshold to 31, host threshold
   4871 	 * to 1 and make sure the granularity is "descriptors" and not
   4872 	 * "cache lines"
   4873 	 */
   4874 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4875 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4876 
   4877 	/* Momentarily enable the RX ring for the changes to take effect */
   4878 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4879 	CSR_WRITE_FLUSH(sc);
   4880 	delay(150);
   4881 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4882 }
   4883 
   4884 /*
   4885  * wm_reset:
   4886  *
   4887  *	Reset the i82542 chip.
   4888  */
   4889 static void
   4890 wm_reset(struct wm_softc *sc)
   4891 {
   4892 	int phy_reset = 0;
   4893 	int i, error = 0;
   4894 	uint32_t reg;
   4895 	uint16_t kmreg;
   4896 	int rv;
   4897 
   4898 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4899 		device_xname(sc->sc_dev), __func__));
   4900 	KASSERT(sc->sc_type != 0);
   4901 
   4902 	/*
   4903 	 * Allocate on-chip memory according to the MTU size.
   4904 	 * The Packet Buffer Allocation register must be written
   4905 	 * before the chip is reset.
   4906 	 */
   4907 	switch (sc->sc_type) {
   4908 	case WM_T_82547:
   4909 	case WM_T_82547_2:
   4910 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4911 		    PBA_22K : PBA_30K;
   4912 		for (i = 0; i < sc->sc_nqueues; i++) {
   4913 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4914 			txq->txq_fifo_head = 0;
   4915 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4916 			txq->txq_fifo_size =
   4917 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4918 			txq->txq_fifo_stall = 0;
   4919 		}
   4920 		break;
   4921 	case WM_T_82571:
   4922 	case WM_T_82572:
   4923 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4924 	case WM_T_80003:
   4925 		sc->sc_pba = PBA_32K;
   4926 		break;
   4927 	case WM_T_82573:
   4928 		sc->sc_pba = PBA_12K;
   4929 		break;
   4930 	case WM_T_82574:
   4931 	case WM_T_82583:
   4932 		sc->sc_pba = PBA_20K;
   4933 		break;
   4934 	case WM_T_82576:
   4935 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4936 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4937 		break;
   4938 	case WM_T_82580:
   4939 	case WM_T_I350:
   4940 	case WM_T_I354:
   4941 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4942 		break;
   4943 	case WM_T_I210:
   4944 	case WM_T_I211:
   4945 		sc->sc_pba = PBA_34K;
   4946 		break;
   4947 	case WM_T_ICH8:
   4948 		/* Workaround for a bit corruption issue in FIFO memory */
   4949 		sc->sc_pba = PBA_8K;
   4950 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4951 		break;
   4952 	case WM_T_ICH9:
   4953 	case WM_T_ICH10:
   4954 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4955 		    PBA_14K : PBA_10K;
   4956 		break;
   4957 	case WM_T_PCH:
   4958 	case WM_T_PCH2:	/* XXX 14K? */
   4959 	case WM_T_PCH_LPT:
   4960 	case WM_T_PCH_SPT:
   4961 	case WM_T_PCH_CNP:
   4962 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   4963 		    PBA_12K : PBA_26K;
   4964 		break;
   4965 	default:
   4966 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4967 		    PBA_40K : PBA_48K;
   4968 		break;
   4969 	}
   4970 	/*
   4971 	 * Only old or non-multiqueue devices have the PBA register
   4972 	 * XXX Need special handling for 82575.
   4973 	 */
   4974 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4975 	    || (sc->sc_type == WM_T_82575))
   4976 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4977 
   4978 	/* Prevent the PCI-E bus from sticking */
   4979 	if (sc->sc_flags & WM_F_PCIE) {
   4980 		int timeout = 800;
   4981 
   4982 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4983 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4984 
   4985 		while (timeout--) {
   4986 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4987 			    == 0)
   4988 				break;
   4989 			delay(100);
   4990 		}
   4991 		if (timeout == 0)
   4992 			device_printf(sc->sc_dev,
   4993 			    "failed to disable busmastering\n");
   4994 	}
   4995 
   4996 	/* Set the completion timeout for interface */
   4997 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4998 	    || (sc->sc_type == WM_T_82580)
   4999 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5000 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5001 		wm_set_pcie_completion_timeout(sc);
   5002 
   5003 	/* Clear interrupt */
   5004 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5005 	if (wm_is_using_msix(sc)) {
   5006 		if (sc->sc_type != WM_T_82574) {
   5007 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5008 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5009 		} else
   5010 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5011 	}
   5012 
   5013 	/* Stop the transmit and receive processes. */
   5014 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5015 	sc->sc_rctl &= ~RCTL_EN;
   5016 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5017 	CSR_WRITE_FLUSH(sc);
   5018 
   5019 	/* XXX set_tbi_sbp_82543() */
   5020 
   5021 	delay(10*1000);
   5022 
   5023 	/* Must acquire the MDIO ownership before MAC reset */
   5024 	switch (sc->sc_type) {
   5025 	case WM_T_82573:
   5026 	case WM_T_82574:
   5027 	case WM_T_82583:
   5028 		error = wm_get_hw_semaphore_82573(sc);
   5029 		break;
   5030 	default:
   5031 		break;
   5032 	}
   5033 
   5034 	/*
   5035 	 * 82541 Errata 29? & 82547 Errata 28?
   5036 	 * See also the description about PHY_RST bit in CTRL register
   5037 	 * in 8254x_GBe_SDM.pdf.
   5038 	 */
   5039 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5040 		CSR_WRITE(sc, WMREG_CTRL,
   5041 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5042 		CSR_WRITE_FLUSH(sc);
   5043 		delay(5000);
   5044 	}
   5045 
   5046 	switch (sc->sc_type) {
   5047 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5048 	case WM_T_82541:
   5049 	case WM_T_82541_2:
   5050 	case WM_T_82547:
   5051 	case WM_T_82547_2:
   5052 		/*
   5053 		 * On some chipsets, a reset through a memory-mapped write
   5054 		 * cycle can cause the chip to reset before completing the
   5055 		 * write cycle. This causes major headache that can be avoided
   5056 		 * by issuing the reset via indirect register writes through
   5057 		 * I/O space.
   5058 		 *
   5059 		 * So, if we successfully mapped the I/O BAR at attach time,
   5060 		 * use that. Otherwise, try our luck with a memory-mapped
   5061 		 * reset.
   5062 		 */
   5063 		if (sc->sc_flags & WM_F_IOH_VALID)
   5064 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5065 		else
   5066 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5067 		break;
   5068 	case WM_T_82545_3:
   5069 	case WM_T_82546_3:
   5070 		/* Use the shadow control register on these chips. */
   5071 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5072 		break;
   5073 	case WM_T_80003:
   5074 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5075 		sc->phy.acquire(sc);
   5076 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5077 		sc->phy.release(sc);
   5078 		break;
   5079 	case WM_T_ICH8:
   5080 	case WM_T_ICH9:
   5081 	case WM_T_ICH10:
   5082 	case WM_T_PCH:
   5083 	case WM_T_PCH2:
   5084 	case WM_T_PCH_LPT:
   5085 	case WM_T_PCH_SPT:
   5086 	case WM_T_PCH_CNP:
   5087 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5088 		if (wm_phy_resetisblocked(sc) == false) {
   5089 			/*
   5090 			 * Gate automatic PHY configuration by hardware on
   5091 			 * non-managed 82579
   5092 			 */
   5093 			if ((sc->sc_type == WM_T_PCH2)
   5094 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5095 				== 0))
   5096 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5097 
   5098 			reg |= CTRL_PHY_RESET;
   5099 			phy_reset = 1;
   5100 		} else
   5101 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5102 		sc->phy.acquire(sc);
   5103 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5104 		/* Don't insert a completion barrier when reset */
   5105 		delay(20*1000);
   5106 		mutex_exit(sc->sc_ich_phymtx);
   5107 		break;
   5108 	case WM_T_82580:
   5109 	case WM_T_I350:
   5110 	case WM_T_I354:
   5111 	case WM_T_I210:
   5112 	case WM_T_I211:
   5113 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5114 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5115 			CSR_WRITE_FLUSH(sc);
   5116 		delay(5000);
   5117 		break;
   5118 	case WM_T_82542_2_0:
   5119 	case WM_T_82542_2_1:
   5120 	case WM_T_82543:
   5121 	case WM_T_82540:
   5122 	case WM_T_82545:
   5123 	case WM_T_82546:
   5124 	case WM_T_82571:
   5125 	case WM_T_82572:
   5126 	case WM_T_82573:
   5127 	case WM_T_82574:
   5128 	case WM_T_82575:
   5129 	case WM_T_82576:
   5130 	case WM_T_82583:
   5131 	default:
   5132 		/* Everything else can safely use the documented method. */
   5133 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5134 		break;
   5135 	}
   5136 
   5137 	/* Must release the MDIO ownership after MAC reset */
   5138 	switch (sc->sc_type) {
   5139 	case WM_T_82573:
   5140 	case WM_T_82574:
   5141 	case WM_T_82583:
   5142 		if (error == 0)
   5143 			wm_put_hw_semaphore_82573(sc);
   5144 		break;
   5145 	default:
   5146 		break;
   5147 	}
   5148 
   5149 	/* Set Phy Config Counter to 50msec */
   5150 	if (sc->sc_type == WM_T_PCH2) {
   5151 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5152 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5153 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5154 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5155 	}
   5156 
   5157 	if (phy_reset != 0)
   5158 		wm_get_cfg_done(sc);
   5159 
   5160 	/* Reload EEPROM */
   5161 	switch (sc->sc_type) {
   5162 	case WM_T_82542_2_0:
   5163 	case WM_T_82542_2_1:
   5164 	case WM_T_82543:
   5165 	case WM_T_82544:
   5166 		delay(10);
   5167 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5168 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5169 		CSR_WRITE_FLUSH(sc);
   5170 		delay(2000);
   5171 		break;
   5172 	case WM_T_82540:
   5173 	case WM_T_82545:
   5174 	case WM_T_82545_3:
   5175 	case WM_T_82546:
   5176 	case WM_T_82546_3:
   5177 		delay(5*1000);
   5178 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5179 		break;
   5180 	case WM_T_82541:
   5181 	case WM_T_82541_2:
   5182 	case WM_T_82547:
   5183 	case WM_T_82547_2:
   5184 		delay(20000);
   5185 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5186 		break;
   5187 	case WM_T_82571:
   5188 	case WM_T_82572:
   5189 	case WM_T_82573:
   5190 	case WM_T_82574:
   5191 	case WM_T_82583:
   5192 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5193 			delay(10);
   5194 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5195 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5196 			CSR_WRITE_FLUSH(sc);
   5197 		}
   5198 		/* check EECD_EE_AUTORD */
   5199 		wm_get_auto_rd_done(sc);
   5200 		/*
   5201 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5202 		 * is set.
   5203 		 */
   5204 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5205 		    || (sc->sc_type == WM_T_82583))
   5206 			delay(25*1000);
   5207 		break;
   5208 	case WM_T_82575:
   5209 	case WM_T_82576:
   5210 	case WM_T_82580:
   5211 	case WM_T_I350:
   5212 	case WM_T_I354:
   5213 	case WM_T_I210:
   5214 	case WM_T_I211:
   5215 	case WM_T_80003:
   5216 		/* check EECD_EE_AUTORD */
   5217 		wm_get_auto_rd_done(sc);
   5218 		break;
   5219 	case WM_T_ICH8:
   5220 	case WM_T_ICH9:
   5221 	case WM_T_ICH10:
   5222 	case WM_T_PCH:
   5223 	case WM_T_PCH2:
   5224 	case WM_T_PCH_LPT:
   5225 	case WM_T_PCH_SPT:
   5226 	case WM_T_PCH_CNP:
   5227 		break;
   5228 	default:
   5229 		panic("%s: unknown type\n", __func__);
   5230 	}
   5231 
   5232 	/* Check whether EEPROM is present or not */
   5233 	switch (sc->sc_type) {
   5234 	case WM_T_82575:
   5235 	case WM_T_82576:
   5236 	case WM_T_82580:
   5237 	case WM_T_I350:
   5238 	case WM_T_I354:
   5239 	case WM_T_ICH8:
   5240 	case WM_T_ICH9:
   5241 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5242 			/* Not found */
   5243 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5244 			if (sc->sc_type == WM_T_82575)
   5245 				wm_reset_init_script_82575(sc);
   5246 		}
   5247 		break;
   5248 	default:
   5249 		break;
   5250 	}
   5251 
   5252 	if (phy_reset != 0)
   5253 		wm_phy_post_reset(sc);
   5254 
   5255 	if ((sc->sc_type == WM_T_82580)
   5256 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5257 		/* Clear global device reset status bit */
   5258 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5259 	}
   5260 
   5261 	/* Clear any pending interrupt events. */
   5262 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5263 	reg = CSR_READ(sc, WMREG_ICR);
   5264 	if (wm_is_using_msix(sc)) {
   5265 		if (sc->sc_type != WM_T_82574) {
   5266 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5267 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5268 		} else
   5269 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5270 	}
   5271 
   5272 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5273 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5274 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5275 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5276 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5277 		reg |= KABGTXD_BGSQLBIAS;
   5278 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5279 	}
   5280 
   5281 	/* Reload sc_ctrl */
   5282 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5283 
   5284 	wm_set_eee(sc);
   5285 
   5286 	/*
   5287 	 * For PCH, this write will make sure that any noise will be detected
   5288 	 * as a CRC error and be dropped rather than show up as a bad packet
   5289 	 * to the DMA engine
   5290 	 */
   5291 	if (sc->sc_type == WM_T_PCH)
   5292 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5293 
   5294 	if (sc->sc_type >= WM_T_82544)
   5295 		CSR_WRITE(sc, WMREG_WUC, 0);
   5296 
   5297 	if (sc->sc_type < WM_T_82575)
   5298 		wm_disable_aspm(sc); /* Workaround for some chips */
   5299 
   5300 	wm_reset_mdicnfg_82580(sc);
   5301 
   5302 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5303 		wm_pll_workaround_i210(sc);
   5304 
   5305 	if (sc->sc_type == WM_T_80003) {
   5306 		/* Default to TRUE to enable the MDIC W/A */
   5307 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5308 
   5309 		rv = wm_kmrn_readreg(sc,
   5310 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5311 		if (rv == 0) {
   5312 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5313 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5314 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5315 			else
   5316 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5317 		}
   5318 	}
   5319 }
   5320 
   5321 /*
   5322  * wm_add_rxbuf:
   5323  *
   5324  *	Add a receive buffer to the indiciated descriptor.
   5325  */
   5326 static int
   5327 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5328 {
   5329 	struct wm_softc *sc = rxq->rxq_sc;
   5330 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5331 	struct mbuf *m;
   5332 	int error;
   5333 
   5334 	KASSERT(mutex_owned(rxq->rxq_lock));
   5335 
   5336 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5337 	if (m == NULL)
   5338 		return ENOBUFS;
   5339 
   5340 	MCLGET(m, M_DONTWAIT);
   5341 	if ((m->m_flags & M_EXT) == 0) {
   5342 		m_freem(m);
   5343 		return ENOBUFS;
   5344 	}
   5345 
   5346 	if (rxs->rxs_mbuf != NULL)
   5347 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5348 
   5349 	rxs->rxs_mbuf = m;
   5350 
   5351 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5352 	/*
   5353 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5354 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5355 	 */
   5356 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5357 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5358 	if (error) {
   5359 		/* XXX XXX XXX */
   5360 		aprint_error_dev(sc->sc_dev,
   5361 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5362 		panic("wm_add_rxbuf");
   5363 	}
   5364 
   5365 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5366 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5367 
   5368 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5369 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5370 			wm_init_rxdesc(rxq, idx);
   5371 	} else
   5372 		wm_init_rxdesc(rxq, idx);
   5373 
   5374 	return 0;
   5375 }
   5376 
   5377 /*
   5378  * wm_rxdrain:
   5379  *
   5380  *	Drain the receive queue.
   5381  */
   5382 static void
   5383 wm_rxdrain(struct wm_rxqueue *rxq)
   5384 {
   5385 	struct wm_softc *sc = rxq->rxq_sc;
   5386 	struct wm_rxsoft *rxs;
   5387 	int i;
   5388 
   5389 	KASSERT(mutex_owned(rxq->rxq_lock));
   5390 
   5391 	for (i = 0; i < WM_NRXDESC; i++) {
   5392 		rxs = &rxq->rxq_soft[i];
   5393 		if (rxs->rxs_mbuf != NULL) {
   5394 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5395 			m_freem(rxs->rxs_mbuf);
   5396 			rxs->rxs_mbuf = NULL;
   5397 		}
   5398 	}
   5399 }
   5400 
   5401 /*
   5402  * Setup registers for RSS.
   5403  *
   5404  * XXX not yet VMDq support
   5405  */
   5406 static void
   5407 wm_init_rss(struct wm_softc *sc)
   5408 {
   5409 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5410 	int i;
   5411 
   5412 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5413 
   5414 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5415 		unsigned int qid, reta_ent;
   5416 
   5417 		qid  = i % sc->sc_nqueues;
   5418 		switch (sc->sc_type) {
   5419 		case WM_T_82574:
   5420 			reta_ent = __SHIFTIN(qid,
   5421 			    RETA_ENT_QINDEX_MASK_82574);
   5422 			break;
   5423 		case WM_T_82575:
   5424 			reta_ent = __SHIFTIN(qid,
   5425 			    RETA_ENT_QINDEX1_MASK_82575);
   5426 			break;
   5427 		default:
   5428 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5429 			break;
   5430 		}
   5431 
   5432 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5433 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5434 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5435 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5436 	}
   5437 
   5438 	rss_getkey((uint8_t *)rss_key);
   5439 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5440 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5441 
   5442 	if (sc->sc_type == WM_T_82574)
   5443 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5444 	else
   5445 		mrqc = MRQC_ENABLE_RSS_MQ;
   5446 
   5447 	/*
   5448 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5449 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5450 	 */
   5451 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5452 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5453 #if 0
   5454 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5455 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5456 #endif
   5457 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5458 
   5459 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5460 }
   5461 
   5462 /*
   5463  * Adjust TX and RX queue numbers which the system actulally uses.
   5464  *
   5465  * The numbers are affected by below parameters.
   5466  *     - The nubmer of hardware queues
   5467  *     - The number of MSI-X vectors (= "nvectors" argument)
   5468  *     - ncpu
   5469  */
   5470 static void
   5471 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5472 {
   5473 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5474 
   5475 	if (nvectors < 2) {
   5476 		sc->sc_nqueues = 1;
   5477 		return;
   5478 	}
   5479 
   5480 	switch (sc->sc_type) {
   5481 	case WM_T_82572:
   5482 		hw_ntxqueues = 2;
   5483 		hw_nrxqueues = 2;
   5484 		break;
   5485 	case WM_T_82574:
   5486 		hw_ntxqueues = 2;
   5487 		hw_nrxqueues = 2;
   5488 		break;
   5489 	case WM_T_82575:
   5490 		hw_ntxqueues = 4;
   5491 		hw_nrxqueues = 4;
   5492 		break;
   5493 	case WM_T_82576:
   5494 		hw_ntxqueues = 16;
   5495 		hw_nrxqueues = 16;
   5496 		break;
   5497 	case WM_T_82580:
   5498 	case WM_T_I350:
   5499 	case WM_T_I354:
   5500 		hw_ntxqueues = 8;
   5501 		hw_nrxqueues = 8;
   5502 		break;
   5503 	case WM_T_I210:
   5504 		hw_ntxqueues = 4;
   5505 		hw_nrxqueues = 4;
   5506 		break;
   5507 	case WM_T_I211:
   5508 		hw_ntxqueues = 2;
   5509 		hw_nrxqueues = 2;
   5510 		break;
   5511 		/*
   5512 		 * As below ethernet controllers does not support MSI-X,
   5513 		 * this driver let them not use multiqueue.
   5514 		 *     - WM_T_80003
   5515 		 *     - WM_T_ICH8
   5516 		 *     - WM_T_ICH9
   5517 		 *     - WM_T_ICH10
   5518 		 *     - WM_T_PCH
   5519 		 *     - WM_T_PCH2
   5520 		 *     - WM_T_PCH_LPT
   5521 		 */
   5522 	default:
   5523 		hw_ntxqueues = 1;
   5524 		hw_nrxqueues = 1;
   5525 		break;
   5526 	}
   5527 
   5528 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5529 
   5530 	/*
   5531 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5532 	 * the number of queues used actually.
   5533 	 */
   5534 	if (nvectors < hw_nqueues + 1)
   5535 		sc->sc_nqueues = nvectors - 1;
   5536 	else
   5537 		sc->sc_nqueues = hw_nqueues;
   5538 
   5539 	/*
   5540 	 * As queues more then cpus cannot improve scaling, we limit
   5541 	 * the number of queues used actually.
   5542 	 */
   5543 	if (ncpu < sc->sc_nqueues)
   5544 		sc->sc_nqueues = ncpu;
   5545 }
   5546 
   5547 static inline bool
   5548 wm_is_using_msix(struct wm_softc *sc)
   5549 {
   5550 
   5551 	return (sc->sc_nintrs > 1);
   5552 }
   5553 
   5554 static inline bool
   5555 wm_is_using_multiqueue(struct wm_softc *sc)
   5556 {
   5557 
   5558 	return (sc->sc_nqueues > 1);
   5559 }
   5560 
   5561 static int
   5562 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   5563 {
   5564 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5565 
   5566 	wmq->wmq_id = qidx;
   5567 	wmq->wmq_intr_idx = intr_idx;
   5568 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   5569 	    wm_handle_queue, wmq);
   5570 	if (wmq->wmq_si != NULL)
   5571 		return 0;
   5572 
   5573 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5574 	    wmq->wmq_id);
   5575 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5576 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5577 	return ENOMEM;
   5578 }
   5579 
   5580 /*
   5581  * Both single interrupt MSI and INTx can use this function.
   5582  */
   5583 static int
   5584 wm_setup_legacy(struct wm_softc *sc)
   5585 {
   5586 	pci_chipset_tag_t pc = sc->sc_pc;
   5587 	const char *intrstr = NULL;
   5588 	char intrbuf[PCI_INTRSTR_LEN];
   5589 	int error;
   5590 
   5591 	error = wm_alloc_txrx_queues(sc);
   5592 	if (error) {
   5593 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5594 		    error);
   5595 		return ENOMEM;
   5596 	}
   5597 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5598 	    sizeof(intrbuf));
   5599 #ifdef WM_MPSAFE
   5600 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5601 #endif
   5602 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5603 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5604 	if (sc->sc_ihs[0] == NULL) {
   5605 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5606 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5607 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5608 		return ENOMEM;
   5609 	}
   5610 
   5611 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5612 	sc->sc_nintrs = 1;
   5613 
   5614 	return wm_softint_establish_queue(sc, 0, 0);
   5615 }
   5616 
   5617 static int
   5618 wm_setup_msix(struct wm_softc *sc)
   5619 {
   5620 	void *vih;
   5621 	kcpuset_t *affinity;
   5622 	int qidx, error, intr_idx, txrx_established;
   5623 	pci_chipset_tag_t pc = sc->sc_pc;
   5624 	const char *intrstr = NULL;
   5625 	char intrbuf[PCI_INTRSTR_LEN];
   5626 	char intr_xname[INTRDEVNAMEBUF];
   5627 
   5628 	if (sc->sc_nqueues < ncpu) {
   5629 		/*
   5630 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5631 		 * interrupts start from CPU#1.
   5632 		 */
   5633 		sc->sc_affinity_offset = 1;
   5634 	} else {
   5635 		/*
   5636 		 * In this case, this device use all CPUs. So, we unify
   5637 		 * affinitied cpu_index to msix vector number for readability.
   5638 		 */
   5639 		sc->sc_affinity_offset = 0;
   5640 	}
   5641 
   5642 	error = wm_alloc_txrx_queues(sc);
   5643 	if (error) {
   5644 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5645 		    error);
   5646 		return ENOMEM;
   5647 	}
   5648 
   5649 	kcpuset_create(&affinity, false);
   5650 	intr_idx = 0;
   5651 
   5652 	/*
   5653 	 * TX and RX
   5654 	 */
   5655 	txrx_established = 0;
   5656 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5657 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5658 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5659 
   5660 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5661 		    sizeof(intrbuf));
   5662 #ifdef WM_MPSAFE
   5663 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5664 		    PCI_INTR_MPSAFE, true);
   5665 #endif
   5666 		memset(intr_xname, 0, sizeof(intr_xname));
   5667 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5668 		    device_xname(sc->sc_dev), qidx);
   5669 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5670 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5671 		if (vih == NULL) {
   5672 			aprint_error_dev(sc->sc_dev,
   5673 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5674 			    intrstr ? " at " : "",
   5675 			    intrstr ? intrstr : "");
   5676 
   5677 			goto fail;
   5678 		}
   5679 		kcpuset_zero(affinity);
   5680 		/* Round-robin affinity */
   5681 		kcpuset_set(affinity, affinity_to);
   5682 		error = interrupt_distribute(vih, affinity, NULL);
   5683 		if (error == 0) {
   5684 			aprint_normal_dev(sc->sc_dev,
   5685 			    "for TX and RX interrupting at %s affinity to %u\n",
   5686 			    intrstr, affinity_to);
   5687 		} else {
   5688 			aprint_normal_dev(sc->sc_dev,
   5689 			    "for TX and RX interrupting at %s\n", intrstr);
   5690 		}
   5691 		sc->sc_ihs[intr_idx] = vih;
   5692 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   5693 			goto fail;
   5694 		txrx_established++;
   5695 		intr_idx++;
   5696 	}
   5697 
   5698 	/* LINK */
   5699 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5700 	    sizeof(intrbuf));
   5701 #ifdef WM_MPSAFE
   5702 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5703 #endif
   5704 	memset(intr_xname, 0, sizeof(intr_xname));
   5705 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5706 	    device_xname(sc->sc_dev));
   5707 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5708 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5709 	if (vih == NULL) {
   5710 		aprint_error_dev(sc->sc_dev,
   5711 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5712 		    intrstr ? " at " : "",
   5713 		    intrstr ? intrstr : "");
   5714 
   5715 		goto fail;
   5716 	}
   5717 	/* Keep default affinity to LINK interrupt */
   5718 	aprint_normal_dev(sc->sc_dev,
   5719 	    "for LINK interrupting at %s\n", intrstr);
   5720 	sc->sc_ihs[intr_idx] = vih;
   5721 	sc->sc_link_intr_idx = intr_idx;
   5722 
   5723 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5724 	kcpuset_destroy(affinity);
   5725 	return 0;
   5726 
   5727  fail:
   5728 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5729 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5730 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5731 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5732 	}
   5733 
   5734 	kcpuset_destroy(affinity);
   5735 	return ENOMEM;
   5736 }
   5737 
   5738 static void
   5739 wm_unset_stopping_flags(struct wm_softc *sc)
   5740 {
   5741 	int i;
   5742 
   5743 	KASSERT(WM_CORE_LOCKED(sc));
   5744 
   5745 	/* Must unset stopping flags in ascending order. */
   5746 	for (i = 0; i < sc->sc_nqueues; i++) {
   5747 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5748 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5749 
   5750 		mutex_enter(txq->txq_lock);
   5751 		txq->txq_stopping = false;
   5752 		mutex_exit(txq->txq_lock);
   5753 
   5754 		mutex_enter(rxq->rxq_lock);
   5755 		rxq->rxq_stopping = false;
   5756 		mutex_exit(rxq->rxq_lock);
   5757 	}
   5758 
   5759 	sc->sc_core_stopping = false;
   5760 }
   5761 
   5762 static void
   5763 wm_set_stopping_flags(struct wm_softc *sc)
   5764 {
   5765 	int i;
   5766 
   5767 	KASSERT(WM_CORE_LOCKED(sc));
   5768 
   5769 	sc->sc_core_stopping = true;
   5770 
   5771 	/* Must set stopping flags in ascending order. */
   5772 	for (i = 0; i < sc->sc_nqueues; i++) {
   5773 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5774 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5775 
   5776 		mutex_enter(rxq->rxq_lock);
   5777 		rxq->rxq_stopping = true;
   5778 		mutex_exit(rxq->rxq_lock);
   5779 
   5780 		mutex_enter(txq->txq_lock);
   5781 		txq->txq_stopping = true;
   5782 		mutex_exit(txq->txq_lock);
   5783 	}
   5784 }
   5785 
   5786 /*
   5787  * Write interrupt interval value to ITR or EITR
   5788  */
   5789 static void
   5790 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5791 {
   5792 
   5793 	if (!wmq->wmq_set_itr)
   5794 		return;
   5795 
   5796 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5797 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5798 
   5799 		/*
   5800 		 * 82575 doesn't have CNT_INGR field.
   5801 		 * So, overwrite counter field by software.
   5802 		 */
   5803 		if (sc->sc_type == WM_T_82575)
   5804 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5805 		else
   5806 			eitr |= EITR_CNT_INGR;
   5807 
   5808 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5809 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5810 		/*
   5811 		 * 82574 has both ITR and EITR. SET EITR when we use
   5812 		 * the multi queue function with MSI-X.
   5813 		 */
   5814 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5815 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5816 	} else {
   5817 		KASSERT(wmq->wmq_id == 0);
   5818 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5819 	}
   5820 
   5821 	wmq->wmq_set_itr = false;
   5822 }
   5823 
   5824 /*
   5825  * TODO
   5826  * Below dynamic calculation of itr is almost the same as linux igb,
   5827  * however it does not fit to wm(4). So, we will have been disable AIM
   5828  * until we will find appropriate calculation of itr.
   5829  */
   5830 /*
   5831  * calculate interrupt interval value to be going to write register in
   5832  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5833  */
   5834 static void
   5835 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5836 {
   5837 #ifdef NOTYET
   5838 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5839 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5840 	uint32_t avg_size = 0;
   5841 	uint32_t new_itr;
   5842 
   5843 	if (rxq->rxq_packets)
   5844 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5845 	if (txq->txq_packets)
   5846 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5847 
   5848 	if (avg_size == 0) {
   5849 		new_itr = 450; /* restore default value */
   5850 		goto out;
   5851 	}
   5852 
   5853 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5854 	avg_size += 24;
   5855 
   5856 	/* Don't starve jumbo frames */
   5857 	avg_size = uimin(avg_size, 3000);
   5858 
   5859 	/* Give a little boost to mid-size frames */
   5860 	if ((avg_size > 300) && (avg_size < 1200))
   5861 		new_itr = avg_size / 3;
   5862 	else
   5863 		new_itr = avg_size / 2;
   5864 
   5865 out:
   5866 	/*
   5867 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5868 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5869 	 */
   5870 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5871 		new_itr *= 4;
   5872 
   5873 	if (new_itr != wmq->wmq_itr) {
   5874 		wmq->wmq_itr = new_itr;
   5875 		wmq->wmq_set_itr = true;
   5876 	} else
   5877 		wmq->wmq_set_itr = false;
   5878 
   5879 	rxq->rxq_packets = 0;
   5880 	rxq->rxq_bytes = 0;
   5881 	txq->txq_packets = 0;
   5882 	txq->txq_bytes = 0;
   5883 #endif
   5884 }
   5885 
   5886 static void
   5887 wm_init_sysctls(struct wm_softc *sc)
   5888 {
   5889 	struct sysctllog **log;
   5890 	const struct sysctlnode *rnode, *qnode, *cnode;
   5891 	int i, rv;
   5892 	const char *dvname;
   5893 
   5894 	log = &sc->sc_sysctllog;
   5895 	dvname = device_xname(sc->sc_dev);
   5896 
   5897 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5898 	    0, CTLTYPE_NODE, dvname,
   5899 	    SYSCTL_DESCR("wm information and settings"),
   5900 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5901 	if (rv != 0)
   5902 		goto err;
   5903 
   5904 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5905 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5906 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5907 	if (rv != 0)
   5908 		goto teardown;
   5909 
   5910 	for (i = 0; i < sc->sc_nqueues; i++) {
   5911 		struct wm_queue *wmq = &sc->sc_queue[i];
   5912 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5913 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5914 
   5915 		snprintf(sc->sc_queue[i].sysctlname,
   5916 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   5917 
   5918 		if (sysctl_createv(log, 0, &rnode, &qnode,
   5919 		    0, CTLTYPE_NODE,
   5920 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   5921 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   5922 			break;
   5923 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5924 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5925 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   5926 		    NULL, 0, &txq->txq_free,
   5927 		    0, CTL_CREATE, CTL_EOL) != 0)
   5928 			break;
   5929 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5930 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5931 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   5932 		    NULL, 0, &txq->txq_next,
   5933 		    0, CTL_CREATE, CTL_EOL) != 0)
   5934 			break;
   5935 
   5936 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5937 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5938 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   5939 		    NULL, 0, &rxq->rxq_ptr,
   5940 		    0, CTL_CREATE, CTL_EOL) != 0)
   5941 			break;
   5942 	}
   5943 
   5944 #ifdef WM_DEBUG
   5945 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5946 	    CTLTYPE_INT, "debug_flags",
   5947 	    SYSCTL_DESCR(
   5948 		    "Debug flags:\n"	\
   5949 		    "\t0x01 LINK\n"	\
   5950 		    "\t0x02 TX\n"	\
   5951 		    "\t0x04 RX\n"	\
   5952 		    "\t0x08 GMII\n"	\
   5953 		    "\t0x10 MANAGE\n"	\
   5954 		    "\t0x20 NVM\n"	\
   5955 		    "\t0x40 INIT\n"	\
   5956 		    "\t0x80 LOCK"),
   5957 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   5958 	if (rv != 0)
   5959 		goto teardown;
   5960 #endif
   5961 
   5962 	return;
   5963 
   5964 teardown:
   5965 	sysctl_teardown(log);
   5966 err:
   5967 	sc->sc_sysctllog = NULL;
   5968 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   5969 	    __func__, rv);
   5970 }
   5971 
   5972 /*
   5973  * wm_init:		[ifnet interface function]
   5974  *
   5975  *	Initialize the interface.
   5976  */
   5977 static int
   5978 wm_init(struct ifnet *ifp)
   5979 {
   5980 	struct wm_softc *sc = ifp->if_softc;
   5981 	int ret;
   5982 
   5983 	WM_CORE_LOCK(sc);
   5984 	ret = wm_init_locked(ifp);
   5985 	WM_CORE_UNLOCK(sc);
   5986 
   5987 	return ret;
   5988 }
   5989 
   5990 static int
   5991 wm_init_locked(struct ifnet *ifp)
   5992 {
   5993 	struct wm_softc *sc = ifp->if_softc;
   5994 	struct ethercom *ec = &sc->sc_ethercom;
   5995 	int i, j, trynum, error = 0;
   5996 	uint32_t reg, sfp_mask = 0;
   5997 
   5998 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   5999 		device_xname(sc->sc_dev), __func__));
   6000 	KASSERT(WM_CORE_LOCKED(sc));
   6001 
   6002 	/*
   6003 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6004 	 * There is a small but measurable benefit to avoiding the adjusment
   6005 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6006 	 * on such platforms.  One possibility is that the DMA itself is
   6007 	 * slightly more efficient if the front of the entire packet (instead
   6008 	 * of the front of the headers) is aligned.
   6009 	 *
   6010 	 * Note we must always set align_tweak to 0 if we are using
   6011 	 * jumbo frames.
   6012 	 */
   6013 #ifdef __NO_STRICT_ALIGNMENT
   6014 	sc->sc_align_tweak = 0;
   6015 #else
   6016 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6017 		sc->sc_align_tweak = 0;
   6018 	else
   6019 		sc->sc_align_tweak = 2;
   6020 #endif /* __NO_STRICT_ALIGNMENT */
   6021 
   6022 	/* Cancel any pending I/O. */
   6023 	wm_stop_locked(ifp, false, false);
   6024 
   6025 	/* Update statistics before reset */
   6026 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6027 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6028 
   6029 	/* PCH_SPT hardware workaround */
   6030 	if (sc->sc_type == WM_T_PCH_SPT)
   6031 		wm_flush_desc_rings(sc);
   6032 
   6033 	/* Reset the chip to a known state. */
   6034 	wm_reset(sc);
   6035 
   6036 	/*
   6037 	 * AMT based hardware can now take control from firmware
   6038 	 * Do this after reset.
   6039 	 */
   6040 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6041 		wm_get_hw_control(sc);
   6042 
   6043 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6044 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6045 		wm_legacy_irq_quirk_spt(sc);
   6046 
   6047 	/* Init hardware bits */
   6048 	wm_initialize_hardware_bits(sc);
   6049 
   6050 	/* Reset the PHY. */
   6051 	if (sc->sc_flags & WM_F_HAS_MII)
   6052 		wm_gmii_reset(sc);
   6053 
   6054 	if (sc->sc_type >= WM_T_ICH8) {
   6055 		reg = CSR_READ(sc, WMREG_GCR);
   6056 		/*
   6057 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6058 		 * default after reset.
   6059 		 */
   6060 		if (sc->sc_type == WM_T_ICH8)
   6061 			reg |= GCR_NO_SNOOP_ALL;
   6062 		else
   6063 			reg &= ~GCR_NO_SNOOP_ALL;
   6064 		CSR_WRITE(sc, WMREG_GCR, reg);
   6065 	}
   6066 
   6067 	if ((sc->sc_type >= WM_T_ICH8)
   6068 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6069 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6070 
   6071 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6072 		reg |= CTRL_EXT_RO_DIS;
   6073 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6074 	}
   6075 
   6076 	/* Calculate (E)ITR value */
   6077 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6078 		/*
   6079 		 * For NEWQUEUE's EITR (except for 82575).
   6080 		 * 82575's EITR should be set same throttling value as other
   6081 		 * old controllers' ITR because the interrupt/sec calculation
   6082 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6083 		 *
   6084 		 * 82574's EITR should be set same throttling value as ITR.
   6085 		 *
   6086 		 * For N interrupts/sec, set this value to:
   6087 		 * 1,000,000 / N in contrast to ITR throttoling value.
   6088 		 */
   6089 		sc->sc_itr_init = 450;
   6090 	} else if (sc->sc_type >= WM_T_82543) {
   6091 		/*
   6092 		 * Set up the interrupt throttling register (units of 256ns)
   6093 		 * Note that a footnote in Intel's documentation says this
   6094 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6095 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6096 		 * that that is also true for the 1024ns units of the other
   6097 		 * interrupt-related timer registers -- so, really, we ought
   6098 		 * to divide this value by 4 when the link speed is low.
   6099 		 *
   6100 		 * XXX implement this division at link speed change!
   6101 		 */
   6102 
   6103 		/*
   6104 		 * For N interrupts/sec, set this value to:
   6105 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6106 		 * absolute and packet timer values to this value
   6107 		 * divided by 4 to get "simple timer" behavior.
   6108 		 */
   6109 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6110 	}
   6111 
   6112 	error = wm_init_txrx_queues(sc);
   6113 	if (error)
   6114 		goto out;
   6115 
   6116 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6117 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6118 	    (sc->sc_type >= WM_T_82575))
   6119 		wm_serdes_power_up_link_82575(sc);
   6120 
   6121 	/* Clear out the VLAN table -- we don't use it (yet). */
   6122 	CSR_WRITE(sc, WMREG_VET, 0);
   6123 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6124 		trynum = 10; /* Due to hw errata */
   6125 	else
   6126 		trynum = 1;
   6127 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6128 		for (j = 0; j < trynum; j++)
   6129 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6130 
   6131 	/*
   6132 	 * Set up flow-control parameters.
   6133 	 *
   6134 	 * XXX Values could probably stand some tuning.
   6135 	 */
   6136 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6137 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6138 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6139 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6140 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6141 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6142 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6143 	}
   6144 
   6145 	sc->sc_fcrtl = FCRTL_DFLT;
   6146 	if (sc->sc_type < WM_T_82543) {
   6147 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6148 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6149 	} else {
   6150 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6151 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6152 	}
   6153 
   6154 	if (sc->sc_type == WM_T_80003)
   6155 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6156 	else
   6157 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6158 
   6159 	/* Writes the control register. */
   6160 	wm_set_vlan(sc);
   6161 
   6162 	if (sc->sc_flags & WM_F_HAS_MII) {
   6163 		uint16_t kmreg;
   6164 
   6165 		switch (sc->sc_type) {
   6166 		case WM_T_80003:
   6167 		case WM_T_ICH8:
   6168 		case WM_T_ICH9:
   6169 		case WM_T_ICH10:
   6170 		case WM_T_PCH:
   6171 		case WM_T_PCH2:
   6172 		case WM_T_PCH_LPT:
   6173 		case WM_T_PCH_SPT:
   6174 		case WM_T_PCH_CNP:
   6175 			/*
   6176 			 * Set the mac to wait the maximum time between each
   6177 			 * iteration and increase the max iterations when
   6178 			 * polling the phy; this fixes erroneous timeouts at
   6179 			 * 10Mbps.
   6180 			 */
   6181 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6182 			    0xFFFF);
   6183 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6184 			    &kmreg);
   6185 			kmreg |= 0x3F;
   6186 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6187 			    kmreg);
   6188 			break;
   6189 		default:
   6190 			break;
   6191 		}
   6192 
   6193 		if (sc->sc_type == WM_T_80003) {
   6194 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6195 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6196 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6197 
   6198 			/* Bypass RX and TX FIFO's */
   6199 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6200 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6201 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6202 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6203 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6204 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6205 		}
   6206 	}
   6207 #if 0
   6208 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6209 #endif
   6210 
   6211 	/* Set up checksum offload parameters. */
   6212 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6213 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6214 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6215 		reg |= RXCSUM_IPOFL;
   6216 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6217 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6218 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6219 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6220 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6221 
   6222 	/* Set registers about MSI-X */
   6223 	if (wm_is_using_msix(sc)) {
   6224 		uint32_t ivar, qintr_idx;
   6225 		struct wm_queue *wmq;
   6226 		unsigned int qid;
   6227 
   6228 		if (sc->sc_type == WM_T_82575) {
   6229 			/* Interrupt control */
   6230 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6231 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6232 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6233 
   6234 			/* TX and RX */
   6235 			for (i = 0; i < sc->sc_nqueues; i++) {
   6236 				wmq = &sc->sc_queue[i];
   6237 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6238 				    EITR_TX_QUEUE(wmq->wmq_id)
   6239 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6240 			}
   6241 			/* Link status */
   6242 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6243 			    EITR_OTHER);
   6244 		} else if (sc->sc_type == WM_T_82574) {
   6245 			/* Interrupt control */
   6246 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6247 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6248 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6249 
   6250 			/*
   6251 			 * Workaround issue with spurious interrupts
   6252 			 * in MSI-X mode.
   6253 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6254 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6255 			 */
   6256 			reg = CSR_READ(sc, WMREG_RFCTL);
   6257 			reg |= WMREG_RFCTL_ACKDIS;
   6258 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6259 
   6260 			ivar = 0;
   6261 			/* TX and RX */
   6262 			for (i = 0; i < sc->sc_nqueues; i++) {
   6263 				wmq = &sc->sc_queue[i];
   6264 				qid = wmq->wmq_id;
   6265 				qintr_idx = wmq->wmq_intr_idx;
   6266 
   6267 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6268 				    IVAR_TX_MASK_Q_82574(qid));
   6269 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6270 				    IVAR_RX_MASK_Q_82574(qid));
   6271 			}
   6272 			/* Link status */
   6273 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6274 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6275 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6276 		} else {
   6277 			/* Interrupt control */
   6278 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6279 			    | GPIE_EIAME | GPIE_PBA);
   6280 
   6281 			switch (sc->sc_type) {
   6282 			case WM_T_82580:
   6283 			case WM_T_I350:
   6284 			case WM_T_I354:
   6285 			case WM_T_I210:
   6286 			case WM_T_I211:
   6287 				/* TX and RX */
   6288 				for (i = 0; i < sc->sc_nqueues; i++) {
   6289 					wmq = &sc->sc_queue[i];
   6290 					qid = wmq->wmq_id;
   6291 					qintr_idx = wmq->wmq_intr_idx;
   6292 
   6293 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6294 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6295 					ivar |= __SHIFTIN((qintr_idx
   6296 						| IVAR_VALID),
   6297 					    IVAR_TX_MASK_Q(qid));
   6298 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6299 					ivar |= __SHIFTIN((qintr_idx
   6300 						| IVAR_VALID),
   6301 					    IVAR_RX_MASK_Q(qid));
   6302 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6303 				}
   6304 				break;
   6305 			case WM_T_82576:
   6306 				/* TX and RX */
   6307 				for (i = 0; i < sc->sc_nqueues; i++) {
   6308 					wmq = &sc->sc_queue[i];
   6309 					qid = wmq->wmq_id;
   6310 					qintr_idx = wmq->wmq_intr_idx;
   6311 
   6312 					ivar = CSR_READ(sc,
   6313 					    WMREG_IVAR_Q_82576(qid));
   6314 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6315 					ivar |= __SHIFTIN((qintr_idx
   6316 						| IVAR_VALID),
   6317 					    IVAR_TX_MASK_Q_82576(qid));
   6318 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6319 					ivar |= __SHIFTIN((qintr_idx
   6320 						| IVAR_VALID),
   6321 					    IVAR_RX_MASK_Q_82576(qid));
   6322 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6323 					    ivar);
   6324 				}
   6325 				break;
   6326 			default:
   6327 				break;
   6328 			}
   6329 
   6330 			/* Link status */
   6331 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6332 			    IVAR_MISC_OTHER);
   6333 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6334 		}
   6335 
   6336 		if (wm_is_using_multiqueue(sc)) {
   6337 			wm_init_rss(sc);
   6338 
   6339 			/*
   6340 			** NOTE: Receive Full-Packet Checksum Offload
   6341 			** is mutually exclusive with Multiqueue. However
   6342 			** this is not the same as TCP/IP checksums which
   6343 			** still work.
   6344 			*/
   6345 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6346 			reg |= RXCSUM_PCSD;
   6347 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6348 		}
   6349 	}
   6350 
   6351 	/* Set up the interrupt registers. */
   6352 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6353 
   6354 	/* Enable SFP module insertion interrupt if it's required */
   6355 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6356 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6357 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6358 		sfp_mask = ICR_GPI(0);
   6359 	}
   6360 
   6361 	if (wm_is_using_msix(sc)) {
   6362 		uint32_t mask;
   6363 		struct wm_queue *wmq;
   6364 
   6365 		switch (sc->sc_type) {
   6366 		case WM_T_82574:
   6367 			mask = 0;
   6368 			for (i = 0; i < sc->sc_nqueues; i++) {
   6369 				wmq = &sc->sc_queue[i];
   6370 				mask |= ICR_TXQ(wmq->wmq_id);
   6371 				mask |= ICR_RXQ(wmq->wmq_id);
   6372 			}
   6373 			mask |= ICR_OTHER;
   6374 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6375 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6376 			break;
   6377 		default:
   6378 			if (sc->sc_type == WM_T_82575) {
   6379 				mask = 0;
   6380 				for (i = 0; i < sc->sc_nqueues; i++) {
   6381 					wmq = &sc->sc_queue[i];
   6382 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6383 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6384 				}
   6385 				mask |= EITR_OTHER;
   6386 			} else {
   6387 				mask = 0;
   6388 				for (i = 0; i < sc->sc_nqueues; i++) {
   6389 					wmq = &sc->sc_queue[i];
   6390 					mask |= 1 << wmq->wmq_intr_idx;
   6391 				}
   6392 				mask |= 1 << sc->sc_link_intr_idx;
   6393 			}
   6394 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6395 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6396 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6397 
   6398 			/* For other interrupts */
   6399 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6400 			break;
   6401 		}
   6402 	} else {
   6403 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6404 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6405 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6406 	}
   6407 
   6408 	/* Set up the inter-packet gap. */
   6409 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6410 
   6411 	if (sc->sc_type >= WM_T_82543) {
   6412 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6413 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6414 			wm_itrs_writereg(sc, wmq);
   6415 		}
   6416 		/*
   6417 		 * Link interrupts occur much less than TX
   6418 		 * interrupts and RX interrupts. So, we don't
   6419 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6420 		 * FreeBSD's if_igb.
   6421 		 */
   6422 	}
   6423 
   6424 	/* Set the VLAN ethernetype. */
   6425 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6426 
   6427 	/*
   6428 	 * Set up the transmit control register; we start out with
   6429 	 * a collision distance suitable for FDX, but update it whe
   6430 	 * we resolve the media type.
   6431 	 */
   6432 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6433 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6434 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6435 	if (sc->sc_type >= WM_T_82571)
   6436 		sc->sc_tctl |= TCTL_MULR;
   6437 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6438 
   6439 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6440 		/* Write TDT after TCTL.EN is set. See the document. */
   6441 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6442 	}
   6443 
   6444 	if (sc->sc_type == WM_T_80003) {
   6445 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6446 		reg &= ~TCTL_EXT_GCEX_MASK;
   6447 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6448 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6449 	}
   6450 
   6451 	/* Set the media. */
   6452 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6453 		goto out;
   6454 
   6455 	/* Configure for OS presence */
   6456 	wm_init_manageability(sc);
   6457 
   6458 	/*
   6459 	 * Set up the receive control register; we actually program the
   6460 	 * register when we set the receive filter. Use multicast address
   6461 	 * offset type 0.
   6462 	 *
   6463 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6464 	 * don't enable that feature.
   6465 	 */
   6466 	sc->sc_mchash_type = 0;
   6467 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6468 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6469 
   6470 	/* 82574 use one buffer extended Rx descriptor. */
   6471 	if (sc->sc_type == WM_T_82574)
   6472 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6473 
   6474 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   6475 		sc->sc_rctl |= RCTL_SECRC;
   6476 
   6477 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6478 	    && (ifp->if_mtu > ETHERMTU)) {
   6479 		sc->sc_rctl |= RCTL_LPE;
   6480 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6481 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6482 	}
   6483 
   6484 	if (MCLBYTES == 2048)
   6485 		sc->sc_rctl |= RCTL_2k;
   6486 	else {
   6487 		if (sc->sc_type >= WM_T_82543) {
   6488 			switch (MCLBYTES) {
   6489 			case 4096:
   6490 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6491 				break;
   6492 			case 8192:
   6493 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6494 				break;
   6495 			case 16384:
   6496 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6497 				break;
   6498 			default:
   6499 				panic("wm_init: MCLBYTES %d unsupported",
   6500 				    MCLBYTES);
   6501 				break;
   6502 			}
   6503 		} else
   6504 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6505 	}
   6506 
   6507 	/* Enable ECC */
   6508 	switch (sc->sc_type) {
   6509 	case WM_T_82571:
   6510 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6511 		reg |= PBA_ECC_CORR_EN;
   6512 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6513 		break;
   6514 	case WM_T_PCH_LPT:
   6515 	case WM_T_PCH_SPT:
   6516 	case WM_T_PCH_CNP:
   6517 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6518 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6519 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6520 
   6521 		sc->sc_ctrl |= CTRL_MEHE;
   6522 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6523 		break;
   6524 	default:
   6525 		break;
   6526 	}
   6527 
   6528 	/*
   6529 	 * Set the receive filter.
   6530 	 *
   6531 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6532 	 * the setting of RCTL.EN in wm_set_filter()
   6533 	 */
   6534 	wm_set_filter(sc);
   6535 
   6536 	/* On 575 and later set RDT only if RX enabled */
   6537 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6538 		int qidx;
   6539 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6540 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6541 			for (i = 0; i < WM_NRXDESC; i++) {
   6542 				mutex_enter(rxq->rxq_lock);
   6543 				wm_init_rxdesc(rxq, i);
   6544 				mutex_exit(rxq->rxq_lock);
   6545 
   6546 			}
   6547 		}
   6548 	}
   6549 
   6550 	wm_unset_stopping_flags(sc);
   6551 
   6552 	/* Start the one second link check clock. */
   6553 	callout_schedule(&sc->sc_tick_ch, hz);
   6554 
   6555 	/* ...all done! */
   6556 	ifp->if_flags |= IFF_RUNNING;
   6557 
   6558  out:
   6559 	/* Save last flags for the callback */
   6560 	sc->sc_if_flags = ifp->if_flags;
   6561 	sc->sc_ec_capenable = ec->ec_capenable;
   6562 	if (error)
   6563 		log(LOG_ERR, "%s: interface not running\n",
   6564 		    device_xname(sc->sc_dev));
   6565 	return error;
   6566 }
   6567 
   6568 /*
   6569  * wm_stop:		[ifnet interface function]
   6570  *
   6571  *	Stop transmission on the interface.
   6572  */
   6573 static void
   6574 wm_stop(struct ifnet *ifp, int disable)
   6575 {
   6576 	struct wm_softc *sc = ifp->if_softc;
   6577 
   6578 	ASSERT_SLEEPABLE();
   6579 
   6580 	WM_CORE_LOCK(sc);
   6581 	wm_stop_locked(ifp, disable ? true : false, true);
   6582 	WM_CORE_UNLOCK(sc);
   6583 
   6584 	/*
   6585 	 * After wm_set_stopping_flags(), it is guaranteed
   6586 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   6587 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   6588 	 * because it can sleep...
   6589 	 * so, call workqueue_wait() here.
   6590 	 */
   6591 	for (int i = 0; i < sc->sc_nqueues; i++)
   6592 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   6593 }
   6594 
   6595 static void
   6596 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   6597 {
   6598 	struct wm_softc *sc = ifp->if_softc;
   6599 	struct wm_txsoft *txs;
   6600 	int i, qidx;
   6601 
   6602 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6603 		device_xname(sc->sc_dev), __func__));
   6604 	KASSERT(WM_CORE_LOCKED(sc));
   6605 
   6606 	wm_set_stopping_flags(sc);
   6607 
   6608 	if (sc->sc_flags & WM_F_HAS_MII) {
   6609 		/* Down the MII. */
   6610 		mii_down(&sc->sc_mii);
   6611 	} else {
   6612 #if 0
   6613 		/* Should we clear PHY's status properly? */
   6614 		wm_reset(sc);
   6615 #endif
   6616 	}
   6617 
   6618 	/* Stop the transmit and receive processes. */
   6619 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6620 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6621 	sc->sc_rctl &= ~RCTL_EN;
   6622 
   6623 	/*
   6624 	 * Clear the interrupt mask to ensure the device cannot assert its
   6625 	 * interrupt line.
   6626 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6627 	 * service any currently pending or shared interrupt.
   6628 	 */
   6629 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6630 	sc->sc_icr = 0;
   6631 	if (wm_is_using_msix(sc)) {
   6632 		if (sc->sc_type != WM_T_82574) {
   6633 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6634 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6635 		} else
   6636 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6637 	}
   6638 
   6639 	/*
   6640 	 * Stop callouts after interrupts are disabled; if we have
   6641 	 * to wait for them, we will be releasing the CORE_LOCK
   6642 	 * briefly, which will unblock interrupts on the current CPU.
   6643 	 */
   6644 
   6645 	/* Stop the one second clock. */
   6646 	if (wait)
   6647 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   6648 	else
   6649 		callout_stop(&sc->sc_tick_ch);
   6650 
   6651 	/* Stop the 82547 Tx FIFO stall check timer. */
   6652 	if (sc->sc_type == WM_T_82547) {
   6653 		if (wait)
   6654 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   6655 		else
   6656 			callout_stop(&sc->sc_txfifo_ch);
   6657 	}
   6658 
   6659 	/* Release any queued transmit buffers. */
   6660 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6661 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6662 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6663 		struct mbuf *m;
   6664 
   6665 		mutex_enter(txq->txq_lock);
   6666 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6667 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6668 			txs = &txq->txq_soft[i];
   6669 			if (txs->txs_mbuf != NULL) {
   6670 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6671 				m_freem(txs->txs_mbuf);
   6672 				txs->txs_mbuf = NULL;
   6673 			}
   6674 		}
   6675 		/* Drain txq_interq */
   6676 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6677 			m_freem(m);
   6678 		mutex_exit(txq->txq_lock);
   6679 	}
   6680 
   6681 	/* Mark the interface as down and cancel the watchdog timer. */
   6682 	ifp->if_flags &= ~IFF_RUNNING;
   6683 
   6684 	if (disable) {
   6685 		for (i = 0; i < sc->sc_nqueues; i++) {
   6686 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6687 			mutex_enter(rxq->rxq_lock);
   6688 			wm_rxdrain(rxq);
   6689 			mutex_exit(rxq->rxq_lock);
   6690 		}
   6691 	}
   6692 
   6693 #if 0 /* notyet */
   6694 	if (sc->sc_type >= WM_T_82544)
   6695 		CSR_WRITE(sc, WMREG_WUC, 0);
   6696 #endif
   6697 }
   6698 
   6699 static void
   6700 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6701 {
   6702 	struct mbuf *m;
   6703 	int i;
   6704 
   6705 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6706 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6707 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6708 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6709 		    m->m_data, m->m_len, m->m_flags);
   6710 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6711 	    i, i == 1 ? "" : "s");
   6712 }
   6713 
   6714 /*
   6715  * wm_82547_txfifo_stall:
   6716  *
   6717  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6718  *	reset the FIFO pointers, and restart packet transmission.
   6719  */
   6720 static void
   6721 wm_82547_txfifo_stall(void *arg)
   6722 {
   6723 	struct wm_softc *sc = arg;
   6724 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6725 
   6726 	mutex_enter(txq->txq_lock);
   6727 
   6728 	if (txq->txq_stopping)
   6729 		goto out;
   6730 
   6731 	if (txq->txq_fifo_stall) {
   6732 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6733 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6734 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6735 			/*
   6736 			 * Packets have drained.  Stop transmitter, reset
   6737 			 * FIFO pointers, restart transmitter, and kick
   6738 			 * the packet queue.
   6739 			 */
   6740 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6741 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6742 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6743 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6744 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6745 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6746 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6747 			CSR_WRITE_FLUSH(sc);
   6748 
   6749 			txq->txq_fifo_head = 0;
   6750 			txq->txq_fifo_stall = 0;
   6751 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6752 		} else {
   6753 			/*
   6754 			 * Still waiting for packets to drain; try again in
   6755 			 * another tick.
   6756 			 */
   6757 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6758 		}
   6759 	}
   6760 
   6761 out:
   6762 	mutex_exit(txq->txq_lock);
   6763 }
   6764 
   6765 /*
   6766  * wm_82547_txfifo_bugchk:
   6767  *
   6768  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6769  *	prevent enqueueing a packet that would wrap around the end
   6770  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6771  *
   6772  *	We do this by checking the amount of space before the end
   6773  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6774  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6775  *	the internal FIFO pointers to the beginning, and restart
   6776  *	transmission on the interface.
   6777  */
   6778 #define	WM_FIFO_HDR		0x10
   6779 #define	WM_82547_PAD_LEN	0x3e0
   6780 static int
   6781 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6782 {
   6783 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6784 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6785 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6786 
   6787 	/* Just return if already stalled. */
   6788 	if (txq->txq_fifo_stall)
   6789 		return 1;
   6790 
   6791 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6792 		/* Stall only occurs in half-duplex mode. */
   6793 		goto send_packet;
   6794 	}
   6795 
   6796 	if (len >= WM_82547_PAD_LEN + space) {
   6797 		txq->txq_fifo_stall = 1;
   6798 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6799 		return 1;
   6800 	}
   6801 
   6802  send_packet:
   6803 	txq->txq_fifo_head += len;
   6804 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6805 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6806 
   6807 	return 0;
   6808 }
   6809 
   6810 static int
   6811 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6812 {
   6813 	int error;
   6814 
   6815 	/*
   6816 	 * Allocate the control data structures, and create and load the
   6817 	 * DMA map for it.
   6818 	 *
   6819 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6820 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6821 	 * both sets within the same 4G segment.
   6822 	 */
   6823 	if (sc->sc_type < WM_T_82544)
   6824 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6825 	else
   6826 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6827 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6828 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6829 	else
   6830 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6831 
   6832 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6833 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6834 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6835 		aprint_error_dev(sc->sc_dev,
   6836 		    "unable to allocate TX control data, error = %d\n",
   6837 		    error);
   6838 		goto fail_0;
   6839 	}
   6840 
   6841 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6842 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6843 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6844 		aprint_error_dev(sc->sc_dev,
   6845 		    "unable to map TX control data, error = %d\n", error);
   6846 		goto fail_1;
   6847 	}
   6848 
   6849 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6850 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6851 		aprint_error_dev(sc->sc_dev,
   6852 		    "unable to create TX control data DMA map, error = %d\n",
   6853 		    error);
   6854 		goto fail_2;
   6855 	}
   6856 
   6857 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6858 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6859 		aprint_error_dev(sc->sc_dev,
   6860 		    "unable to load TX control data DMA map, error = %d\n",
   6861 		    error);
   6862 		goto fail_3;
   6863 	}
   6864 
   6865 	return 0;
   6866 
   6867  fail_3:
   6868 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6869  fail_2:
   6870 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6871 	    WM_TXDESCS_SIZE(txq));
   6872  fail_1:
   6873 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6874  fail_0:
   6875 	return error;
   6876 }
   6877 
   6878 static void
   6879 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6880 {
   6881 
   6882 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6883 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6884 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6885 	    WM_TXDESCS_SIZE(txq));
   6886 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6887 }
   6888 
   6889 static int
   6890 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6891 {
   6892 	int error;
   6893 	size_t rxq_descs_size;
   6894 
   6895 	/*
   6896 	 * Allocate the control data structures, and create and load the
   6897 	 * DMA map for it.
   6898 	 *
   6899 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6900 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6901 	 * both sets within the same 4G segment.
   6902 	 */
   6903 	rxq->rxq_ndesc = WM_NRXDESC;
   6904 	if (sc->sc_type == WM_T_82574)
   6905 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6906 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6907 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6908 	else
   6909 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6910 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6911 
   6912 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6913 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6914 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6915 		aprint_error_dev(sc->sc_dev,
   6916 		    "unable to allocate RX control data, error = %d\n",
   6917 		    error);
   6918 		goto fail_0;
   6919 	}
   6920 
   6921 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6922 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6923 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6924 		aprint_error_dev(sc->sc_dev,
   6925 		    "unable to map RX control data, error = %d\n", error);
   6926 		goto fail_1;
   6927 	}
   6928 
   6929 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6930 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6931 		aprint_error_dev(sc->sc_dev,
   6932 		    "unable to create RX control data DMA map, error = %d\n",
   6933 		    error);
   6934 		goto fail_2;
   6935 	}
   6936 
   6937 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6938 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6939 		aprint_error_dev(sc->sc_dev,
   6940 		    "unable to load RX control data DMA map, error = %d\n",
   6941 		    error);
   6942 		goto fail_3;
   6943 	}
   6944 
   6945 	return 0;
   6946 
   6947  fail_3:
   6948 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6949  fail_2:
   6950 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6951 	    rxq_descs_size);
   6952  fail_1:
   6953 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6954  fail_0:
   6955 	return error;
   6956 }
   6957 
   6958 static void
   6959 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6960 {
   6961 
   6962 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6963 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6964 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6965 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6966 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6967 }
   6968 
   6969 
   6970 static int
   6971 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6972 {
   6973 	int i, error;
   6974 
   6975 	/* Create the transmit buffer DMA maps. */
   6976 	WM_TXQUEUELEN(txq) =
   6977 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6978 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6979 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6980 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6981 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6982 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6983 			aprint_error_dev(sc->sc_dev,
   6984 			    "unable to create Tx DMA map %d, error = %d\n",
   6985 			    i, error);
   6986 			goto fail;
   6987 		}
   6988 	}
   6989 
   6990 	return 0;
   6991 
   6992  fail:
   6993 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6994 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6995 			bus_dmamap_destroy(sc->sc_dmat,
   6996 			    txq->txq_soft[i].txs_dmamap);
   6997 	}
   6998 	return error;
   6999 }
   7000 
   7001 static void
   7002 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7003 {
   7004 	int i;
   7005 
   7006 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7007 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7008 			bus_dmamap_destroy(sc->sc_dmat,
   7009 			    txq->txq_soft[i].txs_dmamap);
   7010 	}
   7011 }
   7012 
   7013 static int
   7014 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7015 {
   7016 	int i, error;
   7017 
   7018 	/* Create the receive buffer DMA maps. */
   7019 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7020 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7021 			    MCLBYTES, 0, 0,
   7022 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7023 			aprint_error_dev(sc->sc_dev,
   7024 			    "unable to create Rx DMA map %d error = %d\n",
   7025 			    i, error);
   7026 			goto fail;
   7027 		}
   7028 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7029 	}
   7030 
   7031 	return 0;
   7032 
   7033  fail:
   7034 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7035 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7036 			bus_dmamap_destroy(sc->sc_dmat,
   7037 			    rxq->rxq_soft[i].rxs_dmamap);
   7038 	}
   7039 	return error;
   7040 }
   7041 
   7042 static void
   7043 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7044 {
   7045 	int i;
   7046 
   7047 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7048 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7049 			bus_dmamap_destroy(sc->sc_dmat,
   7050 			    rxq->rxq_soft[i].rxs_dmamap);
   7051 	}
   7052 }
   7053 
   7054 /*
   7055  * wm_alloc_quques:
   7056  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7057  */
   7058 static int
   7059 wm_alloc_txrx_queues(struct wm_softc *sc)
   7060 {
   7061 	int i, error, tx_done, rx_done;
   7062 
   7063 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7064 	    KM_SLEEP);
   7065 	if (sc->sc_queue == NULL) {
   7066 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7067 		error = ENOMEM;
   7068 		goto fail_0;
   7069 	}
   7070 
   7071 	/* For transmission */
   7072 	error = 0;
   7073 	tx_done = 0;
   7074 	for (i = 0; i < sc->sc_nqueues; i++) {
   7075 #ifdef WM_EVENT_COUNTERS
   7076 		int j;
   7077 		const char *xname;
   7078 #endif
   7079 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7080 		txq->txq_sc = sc;
   7081 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7082 
   7083 		error = wm_alloc_tx_descs(sc, txq);
   7084 		if (error)
   7085 			break;
   7086 		error = wm_alloc_tx_buffer(sc, txq);
   7087 		if (error) {
   7088 			wm_free_tx_descs(sc, txq);
   7089 			break;
   7090 		}
   7091 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7092 		if (txq->txq_interq == NULL) {
   7093 			wm_free_tx_descs(sc, txq);
   7094 			wm_free_tx_buffer(sc, txq);
   7095 			error = ENOMEM;
   7096 			break;
   7097 		}
   7098 
   7099 #ifdef WM_EVENT_COUNTERS
   7100 		xname = device_xname(sc->sc_dev);
   7101 
   7102 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7103 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7104 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7105 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7106 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7107 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7108 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7109 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7110 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7111 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7112 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7113 
   7114 		for (j = 0; j < WM_NTXSEGS; j++) {
   7115 			snprintf(txq->txq_txseg_evcnt_names[j],
   7116 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   7117 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   7118 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7119 		}
   7120 
   7121 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7122 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7123 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7124 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7125 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7126 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7127 #endif /* WM_EVENT_COUNTERS */
   7128 
   7129 		tx_done++;
   7130 	}
   7131 	if (error)
   7132 		goto fail_1;
   7133 
   7134 	/* For receive */
   7135 	error = 0;
   7136 	rx_done = 0;
   7137 	for (i = 0; i < sc->sc_nqueues; i++) {
   7138 #ifdef WM_EVENT_COUNTERS
   7139 		const char *xname;
   7140 #endif
   7141 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7142 		rxq->rxq_sc = sc;
   7143 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7144 
   7145 		error = wm_alloc_rx_descs(sc, rxq);
   7146 		if (error)
   7147 			break;
   7148 
   7149 		error = wm_alloc_rx_buffer(sc, rxq);
   7150 		if (error) {
   7151 			wm_free_rx_descs(sc, rxq);
   7152 			break;
   7153 		}
   7154 
   7155 #ifdef WM_EVENT_COUNTERS
   7156 		xname = device_xname(sc->sc_dev);
   7157 
   7158 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7159 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7160 
   7161 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7162 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7163 #endif /* WM_EVENT_COUNTERS */
   7164 
   7165 		rx_done++;
   7166 	}
   7167 	if (error)
   7168 		goto fail_2;
   7169 
   7170 	return 0;
   7171 
   7172  fail_2:
   7173 	for (i = 0; i < rx_done; i++) {
   7174 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7175 		wm_free_rx_buffer(sc, rxq);
   7176 		wm_free_rx_descs(sc, rxq);
   7177 		if (rxq->rxq_lock)
   7178 			mutex_obj_free(rxq->rxq_lock);
   7179 	}
   7180  fail_1:
   7181 	for (i = 0; i < tx_done; i++) {
   7182 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7183 		pcq_destroy(txq->txq_interq);
   7184 		wm_free_tx_buffer(sc, txq);
   7185 		wm_free_tx_descs(sc, txq);
   7186 		if (txq->txq_lock)
   7187 			mutex_obj_free(txq->txq_lock);
   7188 	}
   7189 
   7190 	kmem_free(sc->sc_queue,
   7191 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7192  fail_0:
   7193 	return error;
   7194 }
   7195 
   7196 /*
   7197  * wm_free_quques:
   7198  *	Free {tx,rx}descs and {tx,rx} buffers
   7199  */
   7200 static void
   7201 wm_free_txrx_queues(struct wm_softc *sc)
   7202 {
   7203 	int i;
   7204 
   7205 	for (i = 0; i < sc->sc_nqueues; i++) {
   7206 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7207 
   7208 #ifdef WM_EVENT_COUNTERS
   7209 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7210 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7211 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7212 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7213 #endif /* WM_EVENT_COUNTERS */
   7214 
   7215 		wm_free_rx_buffer(sc, rxq);
   7216 		wm_free_rx_descs(sc, rxq);
   7217 		if (rxq->rxq_lock)
   7218 			mutex_obj_free(rxq->rxq_lock);
   7219 	}
   7220 
   7221 	for (i = 0; i < sc->sc_nqueues; i++) {
   7222 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7223 		struct mbuf *m;
   7224 #ifdef WM_EVENT_COUNTERS
   7225 		int j;
   7226 
   7227 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7228 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7229 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7230 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7231 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7232 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7233 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7234 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7235 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7236 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7237 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7238 
   7239 		for (j = 0; j < WM_NTXSEGS; j++)
   7240 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7241 
   7242 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7243 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7244 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7245 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7246 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7247 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7248 #endif /* WM_EVENT_COUNTERS */
   7249 
   7250 		/* Drain txq_interq */
   7251 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7252 			m_freem(m);
   7253 		pcq_destroy(txq->txq_interq);
   7254 
   7255 		wm_free_tx_buffer(sc, txq);
   7256 		wm_free_tx_descs(sc, txq);
   7257 		if (txq->txq_lock)
   7258 			mutex_obj_free(txq->txq_lock);
   7259 	}
   7260 
   7261 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7262 }
   7263 
   7264 static void
   7265 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7266 {
   7267 
   7268 	KASSERT(mutex_owned(txq->txq_lock));
   7269 
   7270 	/* Initialize the transmit descriptor ring. */
   7271 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7272 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7273 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7274 	txq->txq_free = WM_NTXDESC(txq);
   7275 	txq->txq_next = 0;
   7276 }
   7277 
   7278 static void
   7279 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7280     struct wm_txqueue *txq)
   7281 {
   7282 
   7283 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7284 		device_xname(sc->sc_dev), __func__));
   7285 	KASSERT(mutex_owned(txq->txq_lock));
   7286 
   7287 	if (sc->sc_type < WM_T_82543) {
   7288 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7289 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7290 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7291 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7292 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7293 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7294 	} else {
   7295 		int qid = wmq->wmq_id;
   7296 
   7297 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7298 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7299 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7300 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7301 
   7302 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7303 			/*
   7304 			 * Don't write TDT before TCTL.EN is set.
   7305 			 * See the document.
   7306 			 */
   7307 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7308 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7309 			    | TXDCTL_WTHRESH(0));
   7310 		else {
   7311 			/* XXX should update with AIM? */
   7312 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7313 			if (sc->sc_type >= WM_T_82540) {
   7314 				/* Should be the same */
   7315 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7316 			}
   7317 
   7318 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7319 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7320 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7321 		}
   7322 	}
   7323 }
   7324 
   7325 static void
   7326 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7327 {
   7328 	int i;
   7329 
   7330 	KASSERT(mutex_owned(txq->txq_lock));
   7331 
   7332 	/* Initialize the transmit job descriptors. */
   7333 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7334 		txq->txq_soft[i].txs_mbuf = NULL;
   7335 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7336 	txq->txq_snext = 0;
   7337 	txq->txq_sdirty = 0;
   7338 }
   7339 
   7340 static void
   7341 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7342     struct wm_txqueue *txq)
   7343 {
   7344 
   7345 	KASSERT(mutex_owned(txq->txq_lock));
   7346 
   7347 	/*
   7348 	 * Set up some register offsets that are different between
   7349 	 * the i82542 and the i82543 and later chips.
   7350 	 */
   7351 	if (sc->sc_type < WM_T_82543)
   7352 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7353 	else
   7354 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7355 
   7356 	wm_init_tx_descs(sc, txq);
   7357 	wm_init_tx_regs(sc, wmq, txq);
   7358 	wm_init_tx_buffer(sc, txq);
   7359 
   7360 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7361 	txq->txq_sending = false;
   7362 }
   7363 
   7364 static void
   7365 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7366     struct wm_rxqueue *rxq)
   7367 {
   7368 
   7369 	KASSERT(mutex_owned(rxq->rxq_lock));
   7370 
   7371 	/*
   7372 	 * Initialize the receive descriptor and receive job
   7373 	 * descriptor rings.
   7374 	 */
   7375 	if (sc->sc_type < WM_T_82543) {
   7376 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7377 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7378 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7379 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7380 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7381 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7382 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7383 
   7384 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7385 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7386 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7387 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7388 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7389 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7390 	} else {
   7391 		int qid = wmq->wmq_id;
   7392 
   7393 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7394 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7395 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7396 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7397 
   7398 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7399 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7400 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7401 
   7402 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7403 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7404 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7405 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7406 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7407 			    | RXDCTL_WTHRESH(1));
   7408 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7409 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7410 		} else {
   7411 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7412 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7413 			/* XXX should update with AIM? */
   7414 			CSR_WRITE(sc, WMREG_RDTR,
   7415 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7416 			/* MUST be same */
   7417 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7418 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7419 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7420 		}
   7421 	}
   7422 }
   7423 
   7424 static int
   7425 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7426 {
   7427 	struct wm_rxsoft *rxs;
   7428 	int error, i;
   7429 
   7430 	KASSERT(mutex_owned(rxq->rxq_lock));
   7431 
   7432 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7433 		rxs = &rxq->rxq_soft[i];
   7434 		if (rxs->rxs_mbuf == NULL) {
   7435 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7436 				log(LOG_ERR, "%s: unable to allocate or map "
   7437 				    "rx buffer %d, error = %d\n",
   7438 				    device_xname(sc->sc_dev), i, error);
   7439 				/*
   7440 				 * XXX Should attempt to run with fewer receive
   7441 				 * XXX buffers instead of just failing.
   7442 				 */
   7443 				wm_rxdrain(rxq);
   7444 				return ENOMEM;
   7445 			}
   7446 		} else {
   7447 			/*
   7448 			 * For 82575 and 82576, the RX descriptors must be
   7449 			 * initialized after the setting of RCTL.EN in
   7450 			 * wm_set_filter()
   7451 			 */
   7452 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7453 				wm_init_rxdesc(rxq, i);
   7454 		}
   7455 	}
   7456 	rxq->rxq_ptr = 0;
   7457 	rxq->rxq_discard = 0;
   7458 	WM_RXCHAIN_RESET(rxq);
   7459 
   7460 	return 0;
   7461 }
   7462 
   7463 static int
   7464 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7465     struct wm_rxqueue *rxq)
   7466 {
   7467 
   7468 	KASSERT(mutex_owned(rxq->rxq_lock));
   7469 
   7470 	/*
   7471 	 * Set up some register offsets that are different between
   7472 	 * the i82542 and the i82543 and later chips.
   7473 	 */
   7474 	if (sc->sc_type < WM_T_82543)
   7475 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7476 	else
   7477 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7478 
   7479 	wm_init_rx_regs(sc, wmq, rxq);
   7480 	return wm_init_rx_buffer(sc, rxq);
   7481 }
   7482 
   7483 /*
   7484  * wm_init_quques:
   7485  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7486  */
   7487 static int
   7488 wm_init_txrx_queues(struct wm_softc *sc)
   7489 {
   7490 	int i, error = 0;
   7491 
   7492 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7493 		device_xname(sc->sc_dev), __func__));
   7494 
   7495 	for (i = 0; i < sc->sc_nqueues; i++) {
   7496 		struct wm_queue *wmq = &sc->sc_queue[i];
   7497 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7498 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7499 
   7500 		/*
   7501 		 * TODO
   7502 		 * Currently, use constant variable instead of AIM.
   7503 		 * Furthermore, the interrupt interval of multiqueue which use
   7504 		 * polling mode is less than default value.
   7505 		 * More tuning and AIM are required.
   7506 		 */
   7507 		if (wm_is_using_multiqueue(sc))
   7508 			wmq->wmq_itr = 50;
   7509 		else
   7510 			wmq->wmq_itr = sc->sc_itr_init;
   7511 		wmq->wmq_set_itr = true;
   7512 
   7513 		mutex_enter(txq->txq_lock);
   7514 		wm_init_tx_queue(sc, wmq, txq);
   7515 		mutex_exit(txq->txq_lock);
   7516 
   7517 		mutex_enter(rxq->rxq_lock);
   7518 		error = wm_init_rx_queue(sc, wmq, rxq);
   7519 		mutex_exit(rxq->rxq_lock);
   7520 		if (error)
   7521 			break;
   7522 	}
   7523 
   7524 	return error;
   7525 }
   7526 
   7527 /*
   7528  * wm_tx_offload:
   7529  *
   7530  *	Set up TCP/IP checksumming parameters for the
   7531  *	specified packet.
   7532  */
   7533 static void
   7534 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7535     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7536 {
   7537 	struct mbuf *m0 = txs->txs_mbuf;
   7538 	struct livengood_tcpip_ctxdesc *t;
   7539 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7540 	uint32_t ipcse;
   7541 	struct ether_header *eh;
   7542 	int offset, iphl;
   7543 	uint8_t fields;
   7544 
   7545 	/*
   7546 	 * XXX It would be nice if the mbuf pkthdr had offset
   7547 	 * fields for the protocol headers.
   7548 	 */
   7549 
   7550 	eh = mtod(m0, struct ether_header *);
   7551 	switch (htons(eh->ether_type)) {
   7552 	case ETHERTYPE_IP:
   7553 	case ETHERTYPE_IPV6:
   7554 		offset = ETHER_HDR_LEN;
   7555 		break;
   7556 
   7557 	case ETHERTYPE_VLAN:
   7558 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7559 		break;
   7560 
   7561 	default:
   7562 		/* Don't support this protocol or encapsulation. */
   7563  		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7564  		txq->txq_last_hw_ipcs = 0;
   7565  		txq->txq_last_hw_tucs = 0;
   7566 		*fieldsp = 0;
   7567 		*cmdp = 0;
   7568 		return;
   7569 	}
   7570 
   7571 	if ((m0->m_pkthdr.csum_flags &
   7572 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7573 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7574 	} else
   7575 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7576 
   7577 	ipcse = offset + iphl - 1;
   7578 
   7579 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7580 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7581 	seg = 0;
   7582 	fields = 0;
   7583 
   7584 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7585 		int hlen = offset + iphl;
   7586 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7587 
   7588 		if (__predict_false(m0->m_len <
   7589 				    (hlen + sizeof(struct tcphdr)))) {
   7590 			/*
   7591 			 * TCP/IP headers are not in the first mbuf; we need
   7592 			 * to do this the slow and painful way. Let's just
   7593 			 * hope this doesn't happen very often.
   7594 			 */
   7595 			struct tcphdr th;
   7596 
   7597 			WM_Q_EVCNT_INCR(txq, tsopain);
   7598 
   7599 			m_copydata(m0, hlen, sizeof(th), &th);
   7600 			if (v4) {
   7601 				struct ip ip;
   7602 
   7603 				m_copydata(m0, offset, sizeof(ip), &ip);
   7604 				ip.ip_len = 0;
   7605 				m_copyback(m0,
   7606 				    offset + offsetof(struct ip, ip_len),
   7607 				    sizeof(ip.ip_len), &ip.ip_len);
   7608 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7609 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7610 			} else {
   7611 				struct ip6_hdr ip6;
   7612 
   7613 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7614 				ip6.ip6_plen = 0;
   7615 				m_copyback(m0,
   7616 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7617 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7618 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7619 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7620 			}
   7621 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7622 			    sizeof(th.th_sum), &th.th_sum);
   7623 
   7624 			hlen += th.th_off << 2;
   7625 		} else {
   7626 			/*
   7627 			 * TCP/IP headers are in the first mbuf; we can do
   7628 			 * this the easy way.
   7629 			 */
   7630 			struct tcphdr *th;
   7631 
   7632 			if (v4) {
   7633 				struct ip *ip =
   7634 				    (void *)(mtod(m0, char *) + offset);
   7635 				th = (void *)(mtod(m0, char *) + hlen);
   7636 
   7637 				ip->ip_len = 0;
   7638 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7639 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7640 			} else {
   7641 				struct ip6_hdr *ip6 =
   7642 				    (void *)(mtod(m0, char *) + offset);
   7643 				th = (void *)(mtod(m0, char *) + hlen);
   7644 
   7645 				ip6->ip6_plen = 0;
   7646 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7647 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7648 			}
   7649 			hlen += th->th_off << 2;
   7650 		}
   7651 
   7652 		if (v4) {
   7653 			WM_Q_EVCNT_INCR(txq, tso);
   7654 			cmdlen |= WTX_TCPIP_CMD_IP;
   7655 		} else {
   7656 			WM_Q_EVCNT_INCR(txq, tso6);
   7657 			ipcse = 0;
   7658 		}
   7659 		cmd |= WTX_TCPIP_CMD_TSE;
   7660 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7661 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7662 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7663 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7664 	}
   7665 
   7666 	/*
   7667 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7668 	 * offload feature, if we load the context descriptor, we
   7669 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7670 	 */
   7671 
   7672 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7673 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7674 	    WTX_TCPIP_IPCSE(ipcse);
   7675 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7676 		WM_Q_EVCNT_INCR(txq, ipsum);
   7677 		fields |= WTX_IXSM;
   7678 	}
   7679 
   7680 	offset += iphl;
   7681 
   7682 	if (m0->m_pkthdr.csum_flags &
   7683 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7684 		WM_Q_EVCNT_INCR(txq, tusum);
   7685 		fields |= WTX_TXSM;
   7686 		tucs = WTX_TCPIP_TUCSS(offset) |
   7687 		    WTX_TCPIP_TUCSO(offset +
   7688 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7689 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7690 	} else if ((m0->m_pkthdr.csum_flags &
   7691 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7692 		WM_Q_EVCNT_INCR(txq, tusum6);
   7693 		fields |= WTX_TXSM;
   7694 		tucs = WTX_TCPIP_TUCSS(offset) |
   7695 		    WTX_TCPIP_TUCSO(offset +
   7696 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7697 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7698 	} else {
   7699 		/* Just initialize it to a valid TCP context. */
   7700 		tucs = WTX_TCPIP_TUCSS(offset) |
   7701 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7702 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7703 	}
   7704 
   7705 	*cmdp = cmd;
   7706 	*fieldsp = fields;
   7707 
   7708 	/*
   7709 	 * We don't have to write context descriptor for every packet
   7710 	 * except for 82574. For 82574, we must write context descriptor
   7711 	 * for every packet when we use two descriptor queues.
   7712 	 *
   7713 	 * The 82574L can only remember the *last* context used
   7714 	 * regardless of queue that it was use for.  We cannot reuse
   7715 	 * contexts on this hardware platform and must generate a new
   7716 	 * context every time.  82574L hardware spec, section 7.2.6,
   7717 	 * second note.
   7718 	 */
   7719 	if (sc->sc_nqueues < 2) {
   7720 		/*
   7721 	 	 *
   7722 	  	 * Setting up new checksum offload context for every
   7723 		 * frames takes a lot of processing time for hardware.
   7724 		 * This also reduces performance a lot for small sized
   7725 		 * frames so avoid it if driver can use previously
   7726 		 * configured checksum offload context.
   7727 		 * For TSO, in theory we can use the same TSO context only if
   7728 		 * frame is the same type(IP/TCP) and the same MSS. However
   7729 		 * checking whether a frame has the same IP/TCP structure is
   7730 		 * hard thing so just ignore that and always restablish a
   7731 		 * new TSO context.
   7732 	  	 */
   7733 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   7734 		    == 0) {
   7735 			if (txq->txq_last_hw_cmd == cmd &&
   7736 			    txq->txq_last_hw_fields == fields &&
   7737 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   7738 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   7739 				WM_Q_EVCNT_INCR(txq, skipcontext);
   7740 				return;
   7741 			}
   7742 		}
   7743 
   7744 	 	txq->txq_last_hw_cmd = cmd;
   7745  		txq->txq_last_hw_fields = fields;
   7746  		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   7747 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   7748 	}
   7749 
   7750 	/* Fill in the context descriptor. */
   7751 	t = (struct livengood_tcpip_ctxdesc *)
   7752 	    &txq->txq_descs[txq->txq_next];
   7753 	t->tcpip_ipcs = htole32(ipcs);
   7754 	t->tcpip_tucs = htole32(tucs);
   7755 	t->tcpip_cmdlen = htole32(cmdlen);
   7756 	t->tcpip_seg = htole32(seg);
   7757 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7758 
   7759 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7760 	txs->txs_ndesc++;
   7761 }
   7762 
   7763 static inline int
   7764 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7765 {
   7766 	struct wm_softc *sc = ifp->if_softc;
   7767 	u_int cpuid = cpu_index(curcpu());
   7768 
   7769 	/*
   7770 	 * Currently, simple distribute strategy.
   7771 	 * TODO:
   7772 	 * distribute by flowid(RSS has value).
   7773 	 */
   7774 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7775 }
   7776 
   7777 static inline bool
   7778 wm_linkdown_discard(struct wm_txqueue *txq)
   7779 {
   7780 
   7781 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   7782 		return true;
   7783 
   7784 	return false;
   7785 }
   7786 
   7787 /*
   7788  * wm_start:		[ifnet interface function]
   7789  *
   7790  *	Start packet transmission on the interface.
   7791  */
   7792 static void
   7793 wm_start(struct ifnet *ifp)
   7794 {
   7795 	struct wm_softc *sc = ifp->if_softc;
   7796 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7797 
   7798 #ifdef WM_MPSAFE
   7799 	KASSERT(if_is_mpsafe(ifp));
   7800 #endif
   7801 	/*
   7802 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7803 	 */
   7804 
   7805 	mutex_enter(txq->txq_lock);
   7806 	if (!txq->txq_stopping)
   7807 		wm_start_locked(ifp);
   7808 	mutex_exit(txq->txq_lock);
   7809 }
   7810 
   7811 static void
   7812 wm_start_locked(struct ifnet *ifp)
   7813 {
   7814 	struct wm_softc *sc = ifp->if_softc;
   7815 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7816 
   7817 	wm_send_common_locked(ifp, txq, false);
   7818 }
   7819 
   7820 static int
   7821 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7822 {
   7823 	int qid;
   7824 	struct wm_softc *sc = ifp->if_softc;
   7825 	struct wm_txqueue *txq;
   7826 
   7827 	qid = wm_select_txqueue(ifp, m);
   7828 	txq = &sc->sc_queue[qid].wmq_txq;
   7829 
   7830 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7831 		m_freem(m);
   7832 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7833 		return ENOBUFS;
   7834 	}
   7835 
   7836 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7837 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7838 	if (m->m_flags & M_MCAST)
   7839 		if_statinc_ref(nsr, if_omcasts);
   7840 	IF_STAT_PUTREF(ifp);
   7841 
   7842 	if (mutex_tryenter(txq->txq_lock)) {
   7843 		if (!txq->txq_stopping)
   7844 			wm_transmit_locked(ifp, txq);
   7845 		mutex_exit(txq->txq_lock);
   7846 	}
   7847 
   7848 	return 0;
   7849 }
   7850 
   7851 static void
   7852 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7853 {
   7854 
   7855 	wm_send_common_locked(ifp, txq, true);
   7856 }
   7857 
   7858 static void
   7859 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7860     bool is_transmit)
   7861 {
   7862 	struct wm_softc *sc = ifp->if_softc;
   7863 	struct mbuf *m0;
   7864 	struct wm_txsoft *txs;
   7865 	bus_dmamap_t dmamap;
   7866 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7867 	bus_addr_t curaddr;
   7868 	bus_size_t seglen, curlen;
   7869 	uint32_t cksumcmd;
   7870 	uint8_t cksumfields;
   7871 	bool remap = true;
   7872 
   7873 	KASSERT(mutex_owned(txq->txq_lock));
   7874 
   7875 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7876 		return;
   7877 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7878 		return;
   7879 
   7880 	if (__predict_false(wm_linkdown_discard(txq))) {
   7881 		do {
   7882 			if (is_transmit)
   7883 				m0 = pcq_get(txq->txq_interq);
   7884 			else
   7885 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   7886 			/*
   7887 			 * increment successed packet counter as in the case
   7888 			 * which the packet is discarded by link down PHY.
   7889 			 */
   7890 			if (m0 != NULL)
   7891 				if_statinc(ifp, if_opackets);
   7892 			m_freem(m0);
   7893 		} while (m0 != NULL);
   7894 		return;
   7895 	}
   7896 
   7897 	/* Remember the previous number of free descriptors. */
   7898 	ofree = txq->txq_free;
   7899 
   7900 	/*
   7901 	 * Loop through the send queue, setting up transmit descriptors
   7902 	 * until we drain the queue, or use up all available transmit
   7903 	 * descriptors.
   7904 	 */
   7905 	for (;;) {
   7906 		m0 = NULL;
   7907 
   7908 		/* Get a work queue entry. */
   7909 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7910 			wm_txeof(txq, UINT_MAX);
   7911 			if (txq->txq_sfree == 0) {
   7912 				DPRINTF(sc, WM_DEBUG_TX,
   7913 				    ("%s: TX: no free job descriptors\n",
   7914 					device_xname(sc->sc_dev)));
   7915 				WM_Q_EVCNT_INCR(txq, txsstall);
   7916 				break;
   7917 			}
   7918 		}
   7919 
   7920 		/* Grab a packet off the queue. */
   7921 		if (is_transmit)
   7922 			m0 = pcq_get(txq->txq_interq);
   7923 		else
   7924 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7925 		if (m0 == NULL)
   7926 			break;
   7927 
   7928 		DPRINTF(sc, WM_DEBUG_TX,
   7929 		    ("%s: TX: have packet to transmit: %p\n",
   7930 			device_xname(sc->sc_dev), m0));
   7931 
   7932 		txs = &txq->txq_soft[txq->txq_snext];
   7933 		dmamap = txs->txs_dmamap;
   7934 
   7935 		use_tso = (m0->m_pkthdr.csum_flags &
   7936 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7937 
   7938 		/*
   7939 		 * So says the Linux driver:
   7940 		 * The controller does a simple calculation to make sure
   7941 		 * there is enough room in the FIFO before initiating the
   7942 		 * DMA for each buffer. The calc is:
   7943 		 *	4 = ceil(buffer len / MSS)
   7944 		 * To make sure we don't overrun the FIFO, adjust the max
   7945 		 * buffer len if the MSS drops.
   7946 		 */
   7947 		dmamap->dm_maxsegsz =
   7948 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7949 		    ? m0->m_pkthdr.segsz << 2
   7950 		    : WTX_MAX_LEN;
   7951 
   7952 		/*
   7953 		 * Load the DMA map.  If this fails, the packet either
   7954 		 * didn't fit in the allotted number of segments, or we
   7955 		 * were short on resources.  For the too-many-segments
   7956 		 * case, we simply report an error and drop the packet,
   7957 		 * since we can't sanely copy a jumbo packet to a single
   7958 		 * buffer.
   7959 		 */
   7960 retry:
   7961 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7962 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7963 		if (__predict_false(error)) {
   7964 			if (error == EFBIG) {
   7965 				if (remap == true) {
   7966 					struct mbuf *m;
   7967 
   7968 					remap = false;
   7969 					m = m_defrag(m0, M_NOWAIT);
   7970 					if (m != NULL) {
   7971 						WM_Q_EVCNT_INCR(txq, defrag);
   7972 						m0 = m;
   7973 						goto retry;
   7974 					}
   7975 				}
   7976 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7977 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7978 				    "DMA segments, dropping...\n",
   7979 				    device_xname(sc->sc_dev));
   7980 				wm_dump_mbuf_chain(sc, m0);
   7981 				m_freem(m0);
   7982 				continue;
   7983 			}
   7984 			/* Short on resources, just stop for now. */
   7985 			DPRINTF(sc, WM_DEBUG_TX,
   7986 			    ("%s: TX: dmamap load failed: %d\n",
   7987 				device_xname(sc->sc_dev), error));
   7988 			break;
   7989 		}
   7990 
   7991 		segs_needed = dmamap->dm_nsegs;
   7992 		if (use_tso) {
   7993 			/* For sentinel descriptor; see below. */
   7994 			segs_needed++;
   7995 		}
   7996 
   7997 		/*
   7998 		 * Ensure we have enough descriptors free to describe
   7999 		 * the packet. Note, we always reserve one descriptor
   8000 		 * at the end of the ring due to the semantics of the
   8001 		 * TDT register, plus one more in the event we need
   8002 		 * to load offload context.
   8003 		 */
   8004 		if (segs_needed > txq->txq_free - 2) {
   8005 			/*
   8006 			 * Not enough free descriptors to transmit this
   8007 			 * packet.  We haven't committed anything yet,
   8008 			 * so just unload the DMA map, put the packet
   8009 			 * pack on the queue, and punt. Notify the upper
   8010 			 * layer that there are no more slots left.
   8011 			 */
   8012 			DPRINTF(sc, WM_DEBUG_TX,
   8013 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8014 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8015 				segs_needed, txq->txq_free - 1));
   8016 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8017 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8018 			WM_Q_EVCNT_INCR(txq, txdstall);
   8019 			break;
   8020 		}
   8021 
   8022 		/*
   8023 		 * Check for 82547 Tx FIFO bug. We need to do this
   8024 		 * once we know we can transmit the packet, since we
   8025 		 * do some internal FIFO space accounting here.
   8026 		 */
   8027 		if (sc->sc_type == WM_T_82547 &&
   8028 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8029 			DPRINTF(sc, WM_DEBUG_TX,
   8030 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8031 				device_xname(sc->sc_dev)));
   8032 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8033 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8034 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8035 			break;
   8036 		}
   8037 
   8038 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8039 
   8040 		DPRINTF(sc, WM_DEBUG_TX,
   8041 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8042 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8043 
   8044 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8045 
   8046 		/*
   8047 		 * Store a pointer to the packet so that we can free it
   8048 		 * later.
   8049 		 *
   8050 		 * Initially, we consider the number of descriptors the
   8051 		 * packet uses the number of DMA segments.  This may be
   8052 		 * incremented by 1 if we do checksum offload (a descriptor
   8053 		 * is used to set the checksum context).
   8054 		 */
   8055 		txs->txs_mbuf = m0;
   8056 		txs->txs_firstdesc = txq->txq_next;
   8057 		txs->txs_ndesc = segs_needed;
   8058 
   8059 		/* Set up offload parameters for this packet. */
   8060 		if (m0->m_pkthdr.csum_flags &
   8061 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8062 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8063 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8064 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8065 		} else {
   8066  			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8067  			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8068 			cksumcmd = 0;
   8069 			cksumfields = 0;
   8070 		}
   8071 
   8072 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8073 
   8074 		/* Sync the DMA map. */
   8075 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8076 		    BUS_DMASYNC_PREWRITE);
   8077 
   8078 		/* Initialize the transmit descriptor. */
   8079 		for (nexttx = txq->txq_next, seg = 0;
   8080 		     seg < dmamap->dm_nsegs; seg++) {
   8081 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8082 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8083 			     seglen != 0;
   8084 			     curaddr += curlen, seglen -= curlen,
   8085 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8086 				curlen = seglen;
   8087 
   8088 				/*
   8089 				 * So says the Linux driver:
   8090 				 * Work around for premature descriptor
   8091 				 * write-backs in TSO mode.  Append a
   8092 				 * 4-byte sentinel descriptor.
   8093 				 */
   8094 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8095 				    curlen > 8)
   8096 					curlen -= 4;
   8097 
   8098 				wm_set_dma_addr(
   8099 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8100 				txq->txq_descs[nexttx].wtx_cmdlen
   8101 				    = htole32(cksumcmd | curlen);
   8102 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8103 				    = 0;
   8104 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8105 				    = cksumfields;
   8106 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8107 				lasttx = nexttx;
   8108 
   8109 				DPRINTF(sc, WM_DEBUG_TX,
   8110 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8111 					"len %#04zx\n",
   8112 					device_xname(sc->sc_dev), nexttx,
   8113 					(uint64_t)curaddr, curlen));
   8114 			}
   8115 		}
   8116 
   8117 		KASSERT(lasttx != -1);
   8118 
   8119 		/*
   8120 		 * Set up the command byte on the last descriptor of
   8121 		 * the packet. If we're in the interrupt delay window,
   8122 		 * delay the interrupt.
   8123 		 */
   8124 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8125 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8126 
   8127 		/*
   8128 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8129 		 * up the descriptor to encapsulate the packet for us.
   8130 		 *
   8131 		 * This is only valid on the last descriptor of the packet.
   8132 		 */
   8133 		if (vlan_has_tag(m0)) {
   8134 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8135 			    htole32(WTX_CMD_VLE);
   8136 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8137 			    = htole16(vlan_get_tag(m0));
   8138 		}
   8139 
   8140 		txs->txs_lastdesc = lasttx;
   8141 
   8142 		DPRINTF(sc, WM_DEBUG_TX,
   8143 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8144 			device_xname(sc->sc_dev),
   8145 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8146 
   8147 		/* Sync the descriptors we're using. */
   8148 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8149 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8150 
   8151 		/* Give the packet to the chip. */
   8152 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8153 
   8154 		DPRINTF(sc, WM_DEBUG_TX,
   8155 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8156 
   8157 		DPRINTF(sc, WM_DEBUG_TX,
   8158 		    ("%s: TX: finished transmitting packet, job %d\n",
   8159 			device_xname(sc->sc_dev), txq->txq_snext));
   8160 
   8161 		/* Advance the tx pointer. */
   8162 		txq->txq_free -= txs->txs_ndesc;
   8163 		txq->txq_next = nexttx;
   8164 
   8165 		txq->txq_sfree--;
   8166 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8167 
   8168 		/* Pass the packet to any BPF listeners. */
   8169 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8170 	}
   8171 
   8172 	if (m0 != NULL) {
   8173 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8174 		WM_Q_EVCNT_INCR(txq, descdrop);
   8175 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8176 			__func__));
   8177 		m_freem(m0);
   8178 	}
   8179 
   8180 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8181 		/* No more slots; notify upper layer. */
   8182 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8183 	}
   8184 
   8185 	if (txq->txq_free != ofree) {
   8186 		/* Set a watchdog timer in case the chip flakes out. */
   8187 		txq->txq_lastsent = time_uptime;
   8188 		txq->txq_sending = true;
   8189 	}
   8190 }
   8191 
   8192 /*
   8193  * wm_nq_tx_offload:
   8194  *
   8195  *	Set up TCP/IP checksumming parameters for the
   8196  *	specified packet, for NEWQUEUE devices
   8197  */
   8198 static void
   8199 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8200     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8201 {
   8202 	struct mbuf *m0 = txs->txs_mbuf;
   8203 	uint32_t vl_len, mssidx, cmdc;
   8204 	struct ether_header *eh;
   8205 	int offset, iphl;
   8206 
   8207 	/*
   8208 	 * XXX It would be nice if the mbuf pkthdr had offset
   8209 	 * fields for the protocol headers.
   8210 	 */
   8211 	*cmdlenp = 0;
   8212 	*fieldsp = 0;
   8213 
   8214 	eh = mtod(m0, struct ether_header *);
   8215 	switch (htons(eh->ether_type)) {
   8216 	case ETHERTYPE_IP:
   8217 	case ETHERTYPE_IPV6:
   8218 		offset = ETHER_HDR_LEN;
   8219 		break;
   8220 
   8221 	case ETHERTYPE_VLAN:
   8222 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8223 		break;
   8224 
   8225 	default:
   8226 		/* Don't support this protocol or encapsulation. */
   8227 		*do_csum = false;
   8228 		return;
   8229 	}
   8230 	*do_csum = true;
   8231 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8232 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8233 
   8234 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8235 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8236 
   8237 	if ((m0->m_pkthdr.csum_flags &
   8238 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8239 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8240 	} else {
   8241 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8242 	}
   8243 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8244 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8245 
   8246 	if (vlan_has_tag(m0)) {
   8247 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8248 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8249 		*cmdlenp |= NQTX_CMD_VLE;
   8250 	}
   8251 
   8252 	mssidx = 0;
   8253 
   8254 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8255 		int hlen = offset + iphl;
   8256 		int tcp_hlen;
   8257 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8258 
   8259 		if (__predict_false(m0->m_len <
   8260 				    (hlen + sizeof(struct tcphdr)))) {
   8261 			/*
   8262 			 * TCP/IP headers are not in the first mbuf; we need
   8263 			 * to do this the slow and painful way. Let's just
   8264 			 * hope this doesn't happen very often.
   8265 			 */
   8266 			struct tcphdr th;
   8267 
   8268 			WM_Q_EVCNT_INCR(txq, tsopain);
   8269 
   8270 			m_copydata(m0, hlen, sizeof(th), &th);
   8271 			if (v4) {
   8272 				struct ip ip;
   8273 
   8274 				m_copydata(m0, offset, sizeof(ip), &ip);
   8275 				ip.ip_len = 0;
   8276 				m_copyback(m0,
   8277 				    offset + offsetof(struct ip, ip_len),
   8278 				    sizeof(ip.ip_len), &ip.ip_len);
   8279 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8280 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8281 			} else {
   8282 				struct ip6_hdr ip6;
   8283 
   8284 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8285 				ip6.ip6_plen = 0;
   8286 				m_copyback(m0,
   8287 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8288 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8289 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8290 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8291 			}
   8292 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8293 			    sizeof(th.th_sum), &th.th_sum);
   8294 
   8295 			tcp_hlen = th.th_off << 2;
   8296 		} else {
   8297 			/*
   8298 			 * TCP/IP headers are in the first mbuf; we can do
   8299 			 * this the easy way.
   8300 			 */
   8301 			struct tcphdr *th;
   8302 
   8303 			if (v4) {
   8304 				struct ip *ip =
   8305 				    (void *)(mtod(m0, char *) + offset);
   8306 				th = (void *)(mtod(m0, char *) + hlen);
   8307 
   8308 				ip->ip_len = 0;
   8309 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8310 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8311 			} else {
   8312 				struct ip6_hdr *ip6 =
   8313 				    (void *)(mtod(m0, char *) + offset);
   8314 				th = (void *)(mtod(m0, char *) + hlen);
   8315 
   8316 				ip6->ip6_plen = 0;
   8317 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8318 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8319 			}
   8320 			tcp_hlen = th->th_off << 2;
   8321 		}
   8322 		hlen += tcp_hlen;
   8323 		*cmdlenp |= NQTX_CMD_TSE;
   8324 
   8325 		if (v4) {
   8326 			WM_Q_EVCNT_INCR(txq, tso);
   8327 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8328 		} else {
   8329 			WM_Q_EVCNT_INCR(txq, tso6);
   8330 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8331 		}
   8332 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8333 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8334 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8335 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8336 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8337 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8338 	} else {
   8339 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8340 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8341 	}
   8342 
   8343 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8344 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8345 		cmdc |= NQTXC_CMD_IP4;
   8346 	}
   8347 
   8348 	if (m0->m_pkthdr.csum_flags &
   8349 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8350 		WM_Q_EVCNT_INCR(txq, tusum);
   8351 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8352 			cmdc |= NQTXC_CMD_TCP;
   8353 		else
   8354 			cmdc |= NQTXC_CMD_UDP;
   8355 
   8356 		cmdc |= NQTXC_CMD_IP4;
   8357 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8358 	}
   8359 	if (m0->m_pkthdr.csum_flags &
   8360 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8361 		WM_Q_EVCNT_INCR(txq, tusum6);
   8362 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8363 			cmdc |= NQTXC_CMD_TCP;
   8364 		else
   8365 			cmdc |= NQTXC_CMD_UDP;
   8366 
   8367 		cmdc |= NQTXC_CMD_IP6;
   8368 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8369 	}
   8370 
   8371 	/*
   8372 	 * We don't have to write context descriptor for every packet to
   8373 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8374 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8375 	 * controllers.
   8376 	 * It would be overhead to write context descriptor for every packet,
   8377 	 * however it does not cause problems.
   8378 	 */
   8379 	/* Fill in the context descriptor. */
   8380 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8381 	    htole32(vl_len);
   8382 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8383 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8384 	    htole32(cmdc);
   8385 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8386 	    htole32(mssidx);
   8387 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8388 	DPRINTF(sc, WM_DEBUG_TX,
   8389 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8390 		txq->txq_next, 0, vl_len));
   8391 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8392 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8393 	txs->txs_ndesc++;
   8394 }
   8395 
   8396 /*
   8397  * wm_nq_start:		[ifnet interface function]
   8398  *
   8399  *	Start packet transmission on the interface for NEWQUEUE devices
   8400  */
   8401 static void
   8402 wm_nq_start(struct ifnet *ifp)
   8403 {
   8404 	struct wm_softc *sc = ifp->if_softc;
   8405 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8406 
   8407 #ifdef WM_MPSAFE
   8408 	KASSERT(if_is_mpsafe(ifp));
   8409 #endif
   8410 	/*
   8411 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8412 	 */
   8413 
   8414 	mutex_enter(txq->txq_lock);
   8415 	if (!txq->txq_stopping)
   8416 		wm_nq_start_locked(ifp);
   8417 	mutex_exit(txq->txq_lock);
   8418 }
   8419 
   8420 static void
   8421 wm_nq_start_locked(struct ifnet *ifp)
   8422 {
   8423 	struct wm_softc *sc = ifp->if_softc;
   8424 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8425 
   8426 	wm_nq_send_common_locked(ifp, txq, false);
   8427 }
   8428 
   8429 static int
   8430 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8431 {
   8432 	int qid;
   8433 	struct wm_softc *sc = ifp->if_softc;
   8434 	struct wm_txqueue *txq;
   8435 
   8436 	qid = wm_select_txqueue(ifp, m);
   8437 	txq = &sc->sc_queue[qid].wmq_txq;
   8438 
   8439 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8440 		m_freem(m);
   8441 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8442 		return ENOBUFS;
   8443 	}
   8444 
   8445 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8446 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8447 	if (m->m_flags & M_MCAST)
   8448 		if_statinc_ref(nsr, if_omcasts);
   8449 	IF_STAT_PUTREF(ifp);
   8450 
   8451 	/*
   8452 	 * The situations which this mutex_tryenter() fails at running time
   8453 	 * are below two patterns.
   8454 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8455 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8456 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8457 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8458 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8459 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8460 	 * stuck, either.
   8461 	 */
   8462 	if (mutex_tryenter(txq->txq_lock)) {
   8463 		if (!txq->txq_stopping)
   8464 			wm_nq_transmit_locked(ifp, txq);
   8465 		mutex_exit(txq->txq_lock);
   8466 	}
   8467 
   8468 	return 0;
   8469 }
   8470 
   8471 static void
   8472 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8473 {
   8474 
   8475 	wm_nq_send_common_locked(ifp, txq, true);
   8476 }
   8477 
   8478 static void
   8479 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8480     bool is_transmit)
   8481 {
   8482 	struct wm_softc *sc = ifp->if_softc;
   8483 	struct mbuf *m0;
   8484 	struct wm_txsoft *txs;
   8485 	bus_dmamap_t dmamap;
   8486 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8487 	bool do_csum, sent;
   8488 	bool remap = true;
   8489 
   8490 	KASSERT(mutex_owned(txq->txq_lock));
   8491 
   8492 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8493 		return;
   8494 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8495 		return;
   8496 
   8497 	if (__predict_false(wm_linkdown_discard(txq))) {
   8498 		do {
   8499 			if (is_transmit)
   8500 				m0 = pcq_get(txq->txq_interq);
   8501 			else
   8502 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8503 			/*
   8504 			 * increment successed packet counter as in the case
   8505 			 * which the packet is discarded by link down PHY.
   8506 			 */
   8507 			if (m0 != NULL)
   8508 				if_statinc(ifp, if_opackets);
   8509 			m_freem(m0);
   8510 		} while (m0 != NULL);
   8511 		return;
   8512 	}
   8513 
   8514 	sent = false;
   8515 
   8516 	/*
   8517 	 * Loop through the send queue, setting up transmit descriptors
   8518 	 * until we drain the queue, or use up all available transmit
   8519 	 * descriptors.
   8520 	 */
   8521 	for (;;) {
   8522 		m0 = NULL;
   8523 
   8524 		/* Get a work queue entry. */
   8525 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8526 			wm_txeof(txq, UINT_MAX);
   8527 			if (txq->txq_sfree == 0) {
   8528 				DPRINTF(sc, WM_DEBUG_TX,
   8529 				    ("%s: TX: no free job descriptors\n",
   8530 					device_xname(sc->sc_dev)));
   8531 				WM_Q_EVCNT_INCR(txq, txsstall);
   8532 				break;
   8533 			}
   8534 		}
   8535 
   8536 		/* Grab a packet off the queue. */
   8537 		if (is_transmit)
   8538 			m0 = pcq_get(txq->txq_interq);
   8539 		else
   8540 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8541 		if (m0 == NULL)
   8542 			break;
   8543 
   8544 		DPRINTF(sc, WM_DEBUG_TX,
   8545 		    ("%s: TX: have packet to transmit: %p\n",
   8546 		    device_xname(sc->sc_dev), m0));
   8547 
   8548 		txs = &txq->txq_soft[txq->txq_snext];
   8549 		dmamap = txs->txs_dmamap;
   8550 
   8551 		/*
   8552 		 * Load the DMA map.  If this fails, the packet either
   8553 		 * didn't fit in the allotted number of segments, or we
   8554 		 * were short on resources.  For the too-many-segments
   8555 		 * case, we simply report an error and drop the packet,
   8556 		 * since we can't sanely copy a jumbo packet to a single
   8557 		 * buffer.
   8558 		 */
   8559 retry:
   8560 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8561 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8562 		if (__predict_false(error)) {
   8563 			if (error == EFBIG) {
   8564 				if (remap == true) {
   8565 					struct mbuf *m;
   8566 
   8567 					remap = false;
   8568 					m = m_defrag(m0, M_NOWAIT);
   8569 					if (m != NULL) {
   8570 						WM_Q_EVCNT_INCR(txq, defrag);
   8571 						m0 = m;
   8572 						goto retry;
   8573 					}
   8574 				}
   8575 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8576 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8577 				    "DMA segments, dropping...\n",
   8578 				    device_xname(sc->sc_dev));
   8579 				wm_dump_mbuf_chain(sc, m0);
   8580 				m_freem(m0);
   8581 				continue;
   8582 			}
   8583 			/* Short on resources, just stop for now. */
   8584 			DPRINTF(sc, WM_DEBUG_TX,
   8585 			    ("%s: TX: dmamap load failed: %d\n",
   8586 				device_xname(sc->sc_dev), error));
   8587 			break;
   8588 		}
   8589 
   8590 		segs_needed = dmamap->dm_nsegs;
   8591 
   8592 		/*
   8593 		 * Ensure we have enough descriptors free to describe
   8594 		 * the packet. Note, we always reserve one descriptor
   8595 		 * at the end of the ring due to the semantics of the
   8596 		 * TDT register, plus one more in the event we need
   8597 		 * to load offload context.
   8598 		 */
   8599 		if (segs_needed > txq->txq_free - 2) {
   8600 			/*
   8601 			 * Not enough free descriptors to transmit this
   8602 			 * packet.  We haven't committed anything yet,
   8603 			 * so just unload the DMA map, put the packet
   8604 			 * pack on the queue, and punt. Notify the upper
   8605 			 * layer that there are no more slots left.
   8606 			 */
   8607 			DPRINTF(sc, WM_DEBUG_TX,
   8608 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8609 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8610 				segs_needed, txq->txq_free - 1));
   8611 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8612 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8613 			WM_Q_EVCNT_INCR(txq, txdstall);
   8614 			break;
   8615 		}
   8616 
   8617 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8618 
   8619 		DPRINTF(sc, WM_DEBUG_TX,
   8620 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8621 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8622 
   8623 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8624 
   8625 		/*
   8626 		 * Store a pointer to the packet so that we can free it
   8627 		 * later.
   8628 		 *
   8629 		 * Initially, we consider the number of descriptors the
   8630 		 * packet uses the number of DMA segments.  This may be
   8631 		 * incremented by 1 if we do checksum offload (a descriptor
   8632 		 * is used to set the checksum context).
   8633 		 */
   8634 		txs->txs_mbuf = m0;
   8635 		txs->txs_firstdesc = txq->txq_next;
   8636 		txs->txs_ndesc = segs_needed;
   8637 
   8638 		/* Set up offload parameters for this packet. */
   8639 		uint32_t cmdlen, fields, dcmdlen;
   8640 		if (m0->m_pkthdr.csum_flags &
   8641 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8642 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8643 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8644 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8645 			    &do_csum);
   8646 		} else {
   8647 			do_csum = false;
   8648 			cmdlen = 0;
   8649 			fields = 0;
   8650 		}
   8651 
   8652 		/* Sync the DMA map. */
   8653 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8654 		    BUS_DMASYNC_PREWRITE);
   8655 
   8656 		/* Initialize the first transmit descriptor. */
   8657 		nexttx = txq->txq_next;
   8658 		if (!do_csum) {
   8659 			/* Setup a legacy descriptor */
   8660 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8661 			    dmamap->dm_segs[0].ds_addr);
   8662 			txq->txq_descs[nexttx].wtx_cmdlen =
   8663 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8664 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8665 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8666 			if (vlan_has_tag(m0)) {
   8667 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8668 				    htole32(WTX_CMD_VLE);
   8669 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8670 				    htole16(vlan_get_tag(m0));
   8671 			} else
   8672 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8673 
   8674 			dcmdlen = 0;
   8675 		} else {
   8676 			/* Setup an advanced data descriptor */
   8677 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8678 			    htole64(dmamap->dm_segs[0].ds_addr);
   8679 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8680 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8681 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8682 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8683 			    htole32(fields);
   8684 			DPRINTF(sc, WM_DEBUG_TX,
   8685 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8686 				device_xname(sc->sc_dev), nexttx,
   8687 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8688 			DPRINTF(sc, WM_DEBUG_TX,
   8689 			    ("\t 0x%08x%08x\n", fields,
   8690 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8691 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8692 		}
   8693 
   8694 		lasttx = nexttx;
   8695 		nexttx = WM_NEXTTX(txq, nexttx);
   8696 		/*
   8697 		 * Fill in the next descriptors. legacy or advanced format
   8698 		 * is the same here
   8699 		 */
   8700 		for (seg = 1; seg < dmamap->dm_nsegs;
   8701 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8702 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8703 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8704 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8705 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8706 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8707 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8708 			lasttx = nexttx;
   8709 
   8710 			DPRINTF(sc, WM_DEBUG_TX,
   8711 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8712 				device_xname(sc->sc_dev), nexttx,
   8713 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8714 				dmamap->dm_segs[seg].ds_len));
   8715 		}
   8716 
   8717 		KASSERT(lasttx != -1);
   8718 
   8719 		/*
   8720 		 * Set up the command byte on the last descriptor of
   8721 		 * the packet. If we're in the interrupt delay window,
   8722 		 * delay the interrupt.
   8723 		 */
   8724 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8725 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8726 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8727 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8728 
   8729 		txs->txs_lastdesc = lasttx;
   8730 
   8731 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8732 		    device_xname(sc->sc_dev),
   8733 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8734 
   8735 		/* Sync the descriptors we're using. */
   8736 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8737 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8738 
   8739 		/* Give the packet to the chip. */
   8740 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8741 		sent = true;
   8742 
   8743 		DPRINTF(sc, WM_DEBUG_TX,
   8744 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8745 
   8746 		DPRINTF(sc, WM_DEBUG_TX,
   8747 		    ("%s: TX: finished transmitting packet, job %d\n",
   8748 			device_xname(sc->sc_dev), txq->txq_snext));
   8749 
   8750 		/* Advance the tx pointer. */
   8751 		txq->txq_free -= txs->txs_ndesc;
   8752 		txq->txq_next = nexttx;
   8753 
   8754 		txq->txq_sfree--;
   8755 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8756 
   8757 		/* Pass the packet to any BPF listeners. */
   8758 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8759 	}
   8760 
   8761 	if (m0 != NULL) {
   8762 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8763 		WM_Q_EVCNT_INCR(txq, descdrop);
   8764 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8765 			__func__));
   8766 		m_freem(m0);
   8767 	}
   8768 
   8769 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8770 		/* No more slots; notify upper layer. */
   8771 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8772 	}
   8773 
   8774 	if (sent) {
   8775 		/* Set a watchdog timer in case the chip flakes out. */
   8776 		txq->txq_lastsent = time_uptime;
   8777 		txq->txq_sending = true;
   8778 	}
   8779 }
   8780 
   8781 static void
   8782 wm_deferred_start_locked(struct wm_txqueue *txq)
   8783 {
   8784 	struct wm_softc *sc = txq->txq_sc;
   8785 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8786 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8787 	int qid = wmq->wmq_id;
   8788 
   8789 	KASSERT(mutex_owned(txq->txq_lock));
   8790 
   8791 	if (txq->txq_stopping) {
   8792 		mutex_exit(txq->txq_lock);
   8793 		return;
   8794 	}
   8795 
   8796 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8797 		/* XXX need for ALTQ or one CPU system */
   8798 		if (qid == 0)
   8799 			wm_nq_start_locked(ifp);
   8800 		wm_nq_transmit_locked(ifp, txq);
   8801 	} else {
   8802 		/* XXX need for ALTQ or one CPU system */
   8803 		if (qid == 0)
   8804 			wm_start_locked(ifp);
   8805 		wm_transmit_locked(ifp, txq);
   8806 	}
   8807 }
   8808 
   8809 /* Interrupt */
   8810 
   8811 /*
   8812  * wm_txeof:
   8813  *
   8814  *	Helper; handle transmit interrupts.
   8815  */
   8816 static bool
   8817 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8818 {
   8819 	struct wm_softc *sc = txq->txq_sc;
   8820 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8821 	struct wm_txsoft *txs;
   8822 	int count = 0;
   8823 	int i;
   8824 	uint8_t status;
   8825 	bool more = false;
   8826 
   8827 	KASSERT(mutex_owned(txq->txq_lock));
   8828 
   8829 	if (txq->txq_stopping)
   8830 		return false;
   8831 
   8832 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8833 
   8834 	/*
   8835 	 * Go through the Tx list and free mbufs for those
   8836 	 * frames which have been transmitted.
   8837 	 */
   8838 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8839 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8840 		if (limit-- == 0) {
   8841 			more = true;
   8842 			DPRINTF(sc, WM_DEBUG_TX,
   8843 			    ("%s: TX: loop limited, job %d is not processed\n",
   8844 				device_xname(sc->sc_dev), i));
   8845 			break;
   8846 		}
   8847 
   8848 		txs = &txq->txq_soft[i];
   8849 
   8850 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8851 			device_xname(sc->sc_dev), i));
   8852 
   8853 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8854 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8855 
   8856 		status =
   8857 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8858 		if ((status & WTX_ST_DD) == 0) {
   8859 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8860 			    BUS_DMASYNC_PREREAD);
   8861 			break;
   8862 		}
   8863 
   8864 		count++;
   8865 		DPRINTF(sc, WM_DEBUG_TX,
   8866 		    ("%s: TX: job %d done: descs %d..%d\n",
   8867 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8868 		    txs->txs_lastdesc));
   8869 
   8870 		/*
   8871 		 * XXX We should probably be using the statistics
   8872 		 * XXX registers, but I don't know if they exist
   8873 		 * XXX on chips before the i82544.
   8874 		 */
   8875 
   8876 #ifdef WM_EVENT_COUNTERS
   8877 		if (status & WTX_ST_TU)
   8878 			WM_Q_EVCNT_INCR(txq, underrun);
   8879 #endif /* WM_EVENT_COUNTERS */
   8880 
   8881 		/*
   8882 		 * 82574 and newer's document says the status field has neither
   8883 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8884 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8885 		 * Developer's Manual", 82574 datasheet and newer.
   8886 		 *
   8887 		 * XXX I saw the LC bit was set on I218 even though the media
   8888 		 * was full duplex, so the bit might be used for other
   8889 		 * meaning ...(I have no document).
   8890 		 */
   8891 
   8892 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8893 		    && ((sc->sc_type < WM_T_82574)
   8894 			|| (sc->sc_type == WM_T_80003))) {
   8895 			if_statinc(ifp, if_oerrors);
   8896 			if (status & WTX_ST_LC)
   8897 				log(LOG_WARNING, "%s: late collision\n",
   8898 				    device_xname(sc->sc_dev));
   8899 			else if (status & WTX_ST_EC) {
   8900 				if_statadd(ifp, if_collisions,
   8901 				    TX_COLLISION_THRESHOLD + 1);
   8902 				log(LOG_WARNING, "%s: excessive collisions\n",
   8903 				    device_xname(sc->sc_dev));
   8904 			}
   8905 		} else
   8906 			if_statinc(ifp, if_opackets);
   8907 
   8908 		txq->txq_packets++;
   8909 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8910 
   8911 		txq->txq_free += txs->txs_ndesc;
   8912 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8913 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8914 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8915 		m_freem(txs->txs_mbuf);
   8916 		txs->txs_mbuf = NULL;
   8917 	}
   8918 
   8919 	/* Update the dirty transmit buffer pointer. */
   8920 	txq->txq_sdirty = i;
   8921 	DPRINTF(sc, WM_DEBUG_TX,
   8922 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8923 
   8924 	if (count != 0)
   8925 		rnd_add_uint32(&sc->rnd_source, count);
   8926 
   8927 	/*
   8928 	 * If there are no more pending transmissions, cancel the watchdog
   8929 	 * timer.
   8930 	 */
   8931 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8932 		txq->txq_sending = false;
   8933 
   8934 	return more;
   8935 }
   8936 
   8937 static inline uint32_t
   8938 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8939 {
   8940 	struct wm_softc *sc = rxq->rxq_sc;
   8941 
   8942 	if (sc->sc_type == WM_T_82574)
   8943 		return EXTRXC_STATUS(
   8944 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   8945 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8946 		return NQRXC_STATUS(
   8947 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   8948 	else
   8949 		return rxq->rxq_descs[idx].wrx_status;
   8950 }
   8951 
   8952 static inline uint32_t
   8953 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8954 {
   8955 	struct wm_softc *sc = rxq->rxq_sc;
   8956 
   8957 	if (sc->sc_type == WM_T_82574)
   8958 		return EXTRXC_ERROR(
   8959 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   8960 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8961 		return NQRXC_ERROR(
   8962 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   8963 	else
   8964 		return rxq->rxq_descs[idx].wrx_errors;
   8965 }
   8966 
   8967 static inline uint16_t
   8968 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8969 {
   8970 	struct wm_softc *sc = rxq->rxq_sc;
   8971 
   8972 	if (sc->sc_type == WM_T_82574)
   8973 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8974 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8975 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8976 	else
   8977 		return rxq->rxq_descs[idx].wrx_special;
   8978 }
   8979 
   8980 static inline int
   8981 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8982 {
   8983 	struct wm_softc *sc = rxq->rxq_sc;
   8984 
   8985 	if (sc->sc_type == WM_T_82574)
   8986 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8987 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8988 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8989 	else
   8990 		return rxq->rxq_descs[idx].wrx_len;
   8991 }
   8992 
   8993 #ifdef WM_DEBUG
   8994 static inline uint32_t
   8995 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8996 {
   8997 	struct wm_softc *sc = rxq->rxq_sc;
   8998 
   8999 	if (sc->sc_type == WM_T_82574)
   9000 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9001 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9002 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9003 	else
   9004 		return 0;
   9005 }
   9006 
   9007 static inline uint8_t
   9008 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9009 {
   9010 	struct wm_softc *sc = rxq->rxq_sc;
   9011 
   9012 	if (sc->sc_type == WM_T_82574)
   9013 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9014 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9015 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9016 	else
   9017 		return 0;
   9018 }
   9019 #endif /* WM_DEBUG */
   9020 
   9021 static inline bool
   9022 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9023     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9024 {
   9025 
   9026 	if (sc->sc_type == WM_T_82574)
   9027 		return (status & ext_bit) != 0;
   9028 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9029 		return (status & nq_bit) != 0;
   9030 	else
   9031 		return (status & legacy_bit) != 0;
   9032 }
   9033 
   9034 static inline bool
   9035 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9036     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9037 {
   9038 
   9039 	if (sc->sc_type == WM_T_82574)
   9040 		return (error & ext_bit) != 0;
   9041 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9042 		return (error & nq_bit) != 0;
   9043 	else
   9044 		return (error & legacy_bit) != 0;
   9045 }
   9046 
   9047 static inline bool
   9048 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9049 {
   9050 
   9051 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9052 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9053 		return true;
   9054 	else
   9055 		return false;
   9056 }
   9057 
   9058 static inline bool
   9059 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9060 {
   9061 	struct wm_softc *sc = rxq->rxq_sc;
   9062 
   9063 	/* XXX missing error bit for newqueue? */
   9064 	if (wm_rxdesc_is_set_error(sc, errors,
   9065 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9066 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9067 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9068 		NQRXC_ERROR_RXE)) {
   9069 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9070 		    EXTRXC_ERROR_SE, 0))
   9071 			log(LOG_WARNING, "%s: symbol error\n",
   9072 			    device_xname(sc->sc_dev));
   9073 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9074 		    EXTRXC_ERROR_SEQ, 0))
   9075 			log(LOG_WARNING, "%s: receive sequence error\n",
   9076 			    device_xname(sc->sc_dev));
   9077 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9078 		    EXTRXC_ERROR_CE, 0))
   9079 			log(LOG_WARNING, "%s: CRC error\n",
   9080 			    device_xname(sc->sc_dev));
   9081 		return true;
   9082 	}
   9083 
   9084 	return false;
   9085 }
   9086 
   9087 static inline bool
   9088 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9089 {
   9090 	struct wm_softc *sc = rxq->rxq_sc;
   9091 
   9092 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9093 		NQRXC_STATUS_DD)) {
   9094 		/* We have processed all of the receive descriptors. */
   9095 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9096 		return false;
   9097 	}
   9098 
   9099 	return true;
   9100 }
   9101 
   9102 static inline bool
   9103 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9104     uint16_t vlantag, struct mbuf *m)
   9105 {
   9106 
   9107 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9108 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9109 		vlan_set_tag(m, le16toh(vlantag));
   9110 	}
   9111 
   9112 	return true;
   9113 }
   9114 
   9115 static inline void
   9116 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9117     uint32_t errors, struct mbuf *m)
   9118 {
   9119 	struct wm_softc *sc = rxq->rxq_sc;
   9120 
   9121 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9122 		if (wm_rxdesc_is_set_status(sc, status,
   9123 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9124 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9125 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9126 			if (wm_rxdesc_is_set_error(sc, errors,
   9127 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9128 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9129 		}
   9130 		if (wm_rxdesc_is_set_status(sc, status,
   9131 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9132 			/*
   9133 			 * Note: we don't know if this was TCP or UDP,
   9134 			 * so we just set both bits, and expect the
   9135 			 * upper layers to deal.
   9136 			 */
   9137 			WM_Q_EVCNT_INCR(rxq, tusum);
   9138 			m->m_pkthdr.csum_flags |=
   9139 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9140 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9141 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9142 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9143 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9144 		}
   9145 	}
   9146 }
   9147 
   9148 /*
   9149  * wm_rxeof:
   9150  *
   9151  *	Helper; handle receive interrupts.
   9152  */
   9153 static bool
   9154 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9155 {
   9156 	struct wm_softc *sc = rxq->rxq_sc;
   9157 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9158 	struct wm_rxsoft *rxs;
   9159 	struct mbuf *m;
   9160 	int i, len;
   9161 	int count = 0;
   9162 	uint32_t status, errors;
   9163 	uint16_t vlantag;
   9164 	bool more = false;
   9165 
   9166 	KASSERT(mutex_owned(rxq->rxq_lock));
   9167 
   9168 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9169 		if (limit-- == 0) {
   9170 			rxq->rxq_ptr = i;
   9171 			more = true;
   9172 			DPRINTF(sc, WM_DEBUG_RX,
   9173 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9174 				device_xname(sc->sc_dev), i));
   9175 			break;
   9176 		}
   9177 
   9178 		rxs = &rxq->rxq_soft[i];
   9179 
   9180 		DPRINTF(sc, WM_DEBUG_RX,
   9181 		    ("%s: RX: checking descriptor %d\n",
   9182 			device_xname(sc->sc_dev), i));
   9183 		wm_cdrxsync(rxq, i,
   9184 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9185 
   9186 		status = wm_rxdesc_get_status(rxq, i);
   9187 		errors = wm_rxdesc_get_errors(rxq, i);
   9188 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9189 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9190 #ifdef WM_DEBUG
   9191 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9192 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9193 #endif
   9194 
   9195 		if (!wm_rxdesc_dd(rxq, i, status)) {
   9196 			/*
   9197 			 * Update the receive pointer holding rxq_lock
   9198 			 * consistent with increment counter.
   9199 			 */
   9200 			rxq->rxq_ptr = i;
   9201 			break;
   9202 		}
   9203 
   9204 		count++;
   9205 		if (__predict_false(rxq->rxq_discard)) {
   9206 			DPRINTF(sc, WM_DEBUG_RX,
   9207 			    ("%s: RX: discarding contents of descriptor %d\n",
   9208 				device_xname(sc->sc_dev), i));
   9209 			wm_init_rxdesc(rxq, i);
   9210 			if (wm_rxdesc_is_eop(rxq, status)) {
   9211 				/* Reset our state. */
   9212 				DPRINTF(sc, WM_DEBUG_RX,
   9213 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9214 					device_xname(sc->sc_dev)));
   9215 				rxq->rxq_discard = 0;
   9216 			}
   9217 			continue;
   9218 		}
   9219 
   9220 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9221 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9222 
   9223 		m = rxs->rxs_mbuf;
   9224 
   9225 		/*
   9226 		 * Add a new receive buffer to the ring, unless of
   9227 		 * course the length is zero. Treat the latter as a
   9228 		 * failed mapping.
   9229 		 */
   9230 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9231 			/*
   9232 			 * Failed, throw away what we've done so
   9233 			 * far, and discard the rest of the packet.
   9234 			 */
   9235 			if_statinc(ifp, if_ierrors);
   9236 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9237 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9238 			wm_init_rxdesc(rxq, i);
   9239 			if (!wm_rxdesc_is_eop(rxq, status))
   9240 				rxq->rxq_discard = 1;
   9241 			if (rxq->rxq_head != NULL)
   9242 				m_freem(rxq->rxq_head);
   9243 			WM_RXCHAIN_RESET(rxq);
   9244 			DPRINTF(sc, WM_DEBUG_RX,
   9245 			    ("%s: RX: Rx buffer allocation failed, "
   9246 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9247 				rxq->rxq_discard ? " (discard)" : ""));
   9248 			continue;
   9249 		}
   9250 
   9251 		m->m_len = len;
   9252 		rxq->rxq_len += len;
   9253 		DPRINTF(sc, WM_DEBUG_RX,
   9254 		    ("%s: RX: buffer at %p len %d\n",
   9255 			device_xname(sc->sc_dev), m->m_data, len));
   9256 
   9257 		/* If this is not the end of the packet, keep looking. */
   9258 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9259 			WM_RXCHAIN_LINK(rxq, m);
   9260 			DPRINTF(sc, WM_DEBUG_RX,
   9261 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9262 				device_xname(sc->sc_dev), rxq->rxq_len));
   9263 			continue;
   9264 		}
   9265 
   9266 		/*
   9267 		 * Okay, we have the entire packet now. The chip is
   9268 		 * configured to include the FCS except I35[04], I21[01].
   9269 		 * (not all chips can be configured to strip it), so we need
   9270 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9271 		 * in RCTL register is always set, so we don't trim it.
   9272 		 * PCH2 and newer chip also not include FCS when jumbo
   9273 		 * frame is used to do workaround an errata.
   9274 		 * May need to adjust length of previous mbuf in the
   9275 		 * chain if the current mbuf is too short.
   9276 		 */
   9277 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9278 			if (m->m_len < ETHER_CRC_LEN) {
   9279 				rxq->rxq_tail->m_len
   9280 				    -= (ETHER_CRC_LEN - m->m_len);
   9281 				m->m_len = 0;
   9282 			} else
   9283 				m->m_len -= ETHER_CRC_LEN;
   9284 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9285 		} else
   9286 			len = rxq->rxq_len;
   9287 
   9288 		WM_RXCHAIN_LINK(rxq, m);
   9289 
   9290 		*rxq->rxq_tailp = NULL;
   9291 		m = rxq->rxq_head;
   9292 
   9293 		WM_RXCHAIN_RESET(rxq);
   9294 
   9295 		DPRINTF(sc, WM_DEBUG_RX,
   9296 		    ("%s: RX: have entire packet, len -> %d\n",
   9297 			device_xname(sc->sc_dev), len));
   9298 
   9299 		/* If an error occurred, update stats and drop the packet. */
   9300 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9301 			m_freem(m);
   9302 			continue;
   9303 		}
   9304 
   9305 		/* No errors.  Receive the packet. */
   9306 		m_set_rcvif(m, ifp);
   9307 		m->m_pkthdr.len = len;
   9308 		/*
   9309 		 * TODO
   9310 		 * should be save rsshash and rsstype to this mbuf.
   9311 		 */
   9312 		DPRINTF(sc, WM_DEBUG_RX,
   9313 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9314 			device_xname(sc->sc_dev), rsstype, rsshash));
   9315 
   9316 		/*
   9317 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9318 		 * for us.  Associate the tag with the packet.
   9319 		 */
   9320 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9321 			continue;
   9322 
   9323 		/* Set up checksum info for this packet. */
   9324 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9325 		/*
   9326 		 * Update the receive pointer holding rxq_lock consistent with
   9327 		 * increment counter.
   9328 		 */
   9329 		rxq->rxq_ptr = i;
   9330 		rxq->rxq_packets++;
   9331 		rxq->rxq_bytes += len;
   9332 		mutex_exit(rxq->rxq_lock);
   9333 
   9334 		/* Pass it on. */
   9335 		if_percpuq_enqueue(sc->sc_ipq, m);
   9336 
   9337 		mutex_enter(rxq->rxq_lock);
   9338 
   9339 		if (rxq->rxq_stopping)
   9340 			break;
   9341 	}
   9342 
   9343 	if (count != 0)
   9344 		rnd_add_uint32(&sc->rnd_source, count);
   9345 
   9346 	DPRINTF(sc, WM_DEBUG_RX,
   9347 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9348 
   9349 	return more;
   9350 }
   9351 
   9352 /*
   9353  * wm_linkintr_gmii:
   9354  *
   9355  *	Helper; handle link interrupts for GMII.
   9356  */
   9357 static void
   9358 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9359 {
   9360 	device_t dev = sc->sc_dev;
   9361 	uint32_t status, reg;
   9362 	bool link;
   9363 	int rv;
   9364 
   9365 	KASSERT(WM_CORE_LOCKED(sc));
   9366 
   9367 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9368 		__func__));
   9369 
   9370 	if ((icr & ICR_LSC) == 0) {
   9371 		if (icr & ICR_RXSEQ)
   9372 			DPRINTF(sc, WM_DEBUG_LINK,
   9373 			    ("%s: LINK Receive sequence error\n",
   9374 				device_xname(dev)));
   9375 		return;
   9376 	}
   9377 
   9378 	/* Link status changed */
   9379 	status = CSR_READ(sc, WMREG_STATUS);
   9380 	link = status & STATUS_LU;
   9381 	if (link) {
   9382 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9383 			device_xname(dev),
   9384 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9385 		if (wm_phy_need_linkdown_discard(sc))
   9386 			wm_clear_linkdown_discard(sc);
   9387 	} else {
   9388 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9389 			device_xname(dev)));
   9390 		if (wm_phy_need_linkdown_discard(sc))
   9391 			wm_set_linkdown_discard(sc);
   9392 	}
   9393 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9394 		wm_gig_downshift_workaround_ich8lan(sc);
   9395 
   9396 	if ((sc->sc_type == WM_T_ICH8)
   9397 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9398 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9399 	}
   9400 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9401 		device_xname(dev)));
   9402 	mii_pollstat(&sc->sc_mii);
   9403 	if (sc->sc_type == WM_T_82543) {
   9404 		int miistatus, active;
   9405 
   9406 		/*
   9407 		 * With 82543, we need to force speed and
   9408 		 * duplex on the MAC equal to what the PHY
   9409 		 * speed and duplex configuration is.
   9410 		 */
   9411 		miistatus = sc->sc_mii.mii_media_status;
   9412 
   9413 		if (miistatus & IFM_ACTIVE) {
   9414 			active = sc->sc_mii.mii_media_active;
   9415 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9416 			switch (IFM_SUBTYPE(active)) {
   9417 			case IFM_10_T:
   9418 				sc->sc_ctrl |= CTRL_SPEED_10;
   9419 				break;
   9420 			case IFM_100_TX:
   9421 				sc->sc_ctrl |= CTRL_SPEED_100;
   9422 				break;
   9423 			case IFM_1000_T:
   9424 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9425 				break;
   9426 			default:
   9427 				/*
   9428 				 * Fiber?
   9429 				 * Shoud not enter here.
   9430 				 */
   9431 				device_printf(dev, "unknown media (%x)\n",
   9432 				    active);
   9433 				break;
   9434 			}
   9435 			if (active & IFM_FDX)
   9436 				sc->sc_ctrl |= CTRL_FD;
   9437 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9438 		}
   9439 	} else if (sc->sc_type == WM_T_PCH) {
   9440 		wm_k1_gig_workaround_hv(sc,
   9441 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9442 	}
   9443 
   9444 	/*
   9445 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9446 	 * aggressive resulting in many collisions. To avoid this, increase
   9447 	 * the IPG and reduce Rx latency in the PHY.
   9448 	 */
   9449 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9450 	    && link) {
   9451 		uint32_t tipg_reg;
   9452 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9453 		bool fdx;
   9454 		uint16_t emi_addr, emi_val;
   9455 
   9456 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9457 		tipg_reg &= ~TIPG_IPGT_MASK;
   9458 		fdx = status & STATUS_FD;
   9459 
   9460 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9461 			tipg_reg |= 0xff;
   9462 			/* Reduce Rx latency in analog PHY */
   9463 			emi_val = 0;
   9464 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9465 		    fdx && speed != STATUS_SPEED_1000) {
   9466 			tipg_reg |= 0xc;
   9467 			emi_val = 1;
   9468 		} else {
   9469 			/* Roll back the default values */
   9470 			tipg_reg |= 0x08;
   9471 			emi_val = 1;
   9472 		}
   9473 
   9474 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9475 
   9476 		rv = sc->phy.acquire(sc);
   9477 		if (rv)
   9478 			return;
   9479 
   9480 		if (sc->sc_type == WM_T_PCH2)
   9481 			emi_addr = I82579_RX_CONFIG;
   9482 		else
   9483 			emi_addr = I217_RX_CONFIG;
   9484 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9485 
   9486 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9487 			uint16_t phy_reg;
   9488 
   9489 			sc->phy.readreg_locked(dev, 2,
   9490 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9491 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9492 			if (speed == STATUS_SPEED_100
   9493 			    || speed == STATUS_SPEED_10)
   9494 				phy_reg |= 0x3e8;
   9495 			else
   9496 				phy_reg |= 0xfa;
   9497 			sc->phy.writereg_locked(dev, 2,
   9498 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9499 
   9500 			if (speed == STATUS_SPEED_1000) {
   9501 				sc->phy.readreg_locked(dev, 2,
   9502 				    HV_PM_CTRL, &phy_reg);
   9503 
   9504 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9505 
   9506 				sc->phy.writereg_locked(dev, 2,
   9507 				    HV_PM_CTRL, phy_reg);
   9508 			}
   9509 		}
   9510 		sc->phy.release(sc);
   9511 
   9512 		if (rv)
   9513 			return;
   9514 
   9515 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9516 			uint16_t data, ptr_gap;
   9517 
   9518 			if (speed == STATUS_SPEED_1000) {
   9519 				rv = sc->phy.acquire(sc);
   9520 				if (rv)
   9521 					return;
   9522 
   9523 				rv = sc->phy.readreg_locked(dev, 2,
   9524 				    I82579_UNKNOWN1, &data);
   9525 				if (rv) {
   9526 					sc->phy.release(sc);
   9527 					return;
   9528 				}
   9529 
   9530 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9531 				if (ptr_gap < 0x18) {
   9532 					data &= ~(0x3ff << 2);
   9533 					data |= (0x18 << 2);
   9534 					rv = sc->phy.writereg_locked(dev,
   9535 					    2, I82579_UNKNOWN1, data);
   9536 				}
   9537 				sc->phy.release(sc);
   9538 				if (rv)
   9539 					return;
   9540 			} else {
   9541 				rv = sc->phy.acquire(sc);
   9542 				if (rv)
   9543 					return;
   9544 
   9545 				rv = sc->phy.writereg_locked(dev, 2,
   9546 				    I82579_UNKNOWN1, 0xc023);
   9547 				sc->phy.release(sc);
   9548 				if (rv)
   9549 					return;
   9550 
   9551 			}
   9552 		}
   9553 	}
   9554 
   9555 	/*
   9556 	 * I217 Packet Loss issue:
   9557 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9558 	 * on power up.
   9559 	 * Set the Beacon Duration for I217 to 8 usec
   9560 	 */
   9561 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9562 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9563 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9564 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9565 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9566 	}
   9567 
   9568 	/* Work-around I218 hang issue */
   9569 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9570 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9571 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9572 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9573 		wm_k1_workaround_lpt_lp(sc, link);
   9574 
   9575 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9576 		/*
   9577 		 * Set platform power management values for Latency
   9578 		 * Tolerance Reporting (LTR)
   9579 		 */
   9580 		wm_platform_pm_pch_lpt(sc,
   9581 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9582 	}
   9583 
   9584 	/* Clear link partner's EEE ability */
   9585 	sc->eee_lp_ability = 0;
   9586 
   9587 	/* FEXTNVM6 K1-off workaround */
   9588 	if (sc->sc_type == WM_T_PCH_SPT) {
   9589 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9590 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9591 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9592 		else
   9593 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9594 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9595 	}
   9596 
   9597 	if (!link)
   9598 		return;
   9599 
   9600 	switch (sc->sc_type) {
   9601 	case WM_T_PCH2:
   9602 		wm_k1_workaround_lv(sc);
   9603 		/* FALLTHROUGH */
   9604 	case WM_T_PCH:
   9605 		if (sc->sc_phytype == WMPHY_82578)
   9606 			wm_link_stall_workaround_hv(sc);
   9607 		break;
   9608 	default:
   9609 		break;
   9610 	}
   9611 
   9612 	/* Enable/Disable EEE after link up */
   9613 	if (sc->sc_phytype > WMPHY_82579)
   9614 		wm_set_eee_pchlan(sc);
   9615 }
   9616 
   9617 /*
   9618  * wm_linkintr_tbi:
   9619  *
   9620  *	Helper; handle link interrupts for TBI mode.
   9621  */
   9622 static void
   9623 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9624 {
   9625 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9626 	uint32_t status;
   9627 
   9628 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9629 		__func__));
   9630 
   9631 	status = CSR_READ(sc, WMREG_STATUS);
   9632 	if (icr & ICR_LSC) {
   9633 		wm_check_for_link(sc);
   9634 		if (status & STATUS_LU) {
   9635 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9636 				device_xname(sc->sc_dev),
   9637 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9638 			/*
   9639 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9640 			 * so we should update sc->sc_ctrl
   9641 			 */
   9642 
   9643 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9644 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9645 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9646 			if (status & STATUS_FD)
   9647 				sc->sc_tctl |=
   9648 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9649 			else
   9650 				sc->sc_tctl |=
   9651 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9652 			if (sc->sc_ctrl & CTRL_TFCE)
   9653 				sc->sc_fcrtl |= FCRTL_XONE;
   9654 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9655 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9656 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9657 			sc->sc_tbi_linkup = 1;
   9658 			if_link_state_change(ifp, LINK_STATE_UP);
   9659 		} else {
   9660 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9661 				device_xname(sc->sc_dev)));
   9662 			sc->sc_tbi_linkup = 0;
   9663 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9664 		}
   9665 		/* Update LED */
   9666 		wm_tbi_serdes_set_linkled(sc);
   9667 	} else if (icr & ICR_RXSEQ)
   9668 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9669 			device_xname(sc->sc_dev)));
   9670 }
   9671 
   9672 /*
   9673  * wm_linkintr_serdes:
   9674  *
   9675  *	Helper; handle link interrupts for TBI mode.
   9676  */
   9677 static void
   9678 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9679 {
   9680 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9681 	struct mii_data *mii = &sc->sc_mii;
   9682 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9683 	uint32_t pcs_adv, pcs_lpab, reg;
   9684 
   9685 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9686 		__func__));
   9687 
   9688 	if (icr & ICR_LSC) {
   9689 		/* Check PCS */
   9690 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9691 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9692 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9693 				device_xname(sc->sc_dev)));
   9694 			mii->mii_media_status |= IFM_ACTIVE;
   9695 			sc->sc_tbi_linkup = 1;
   9696 			if_link_state_change(ifp, LINK_STATE_UP);
   9697 		} else {
   9698 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9699 				device_xname(sc->sc_dev)));
   9700 			mii->mii_media_status |= IFM_NONE;
   9701 			sc->sc_tbi_linkup = 0;
   9702 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9703 			wm_tbi_serdes_set_linkled(sc);
   9704 			return;
   9705 		}
   9706 		mii->mii_media_active |= IFM_1000_SX;
   9707 		if ((reg & PCS_LSTS_FDX) != 0)
   9708 			mii->mii_media_active |= IFM_FDX;
   9709 		else
   9710 			mii->mii_media_active |= IFM_HDX;
   9711 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9712 			/* Check flow */
   9713 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9714 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9715 				DPRINTF(sc, WM_DEBUG_LINK,
   9716 				    ("XXX LINKOK but not ACOMP\n"));
   9717 				return;
   9718 			}
   9719 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9720 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9721 			DPRINTF(sc, WM_DEBUG_LINK,
   9722 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9723 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9724 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9725 				mii->mii_media_active |= IFM_FLOW
   9726 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9727 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9728 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9729 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9730 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9731 				mii->mii_media_active |= IFM_FLOW
   9732 				    | IFM_ETH_TXPAUSE;
   9733 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9734 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9735 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9736 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9737 				mii->mii_media_active |= IFM_FLOW
   9738 				    | IFM_ETH_RXPAUSE;
   9739 		}
   9740 		/* Update LED */
   9741 		wm_tbi_serdes_set_linkled(sc);
   9742 	} else
   9743 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9744 		    device_xname(sc->sc_dev)));
   9745 }
   9746 
   9747 /*
   9748  * wm_linkintr:
   9749  *
   9750  *	Helper; handle link interrupts.
   9751  */
   9752 static void
   9753 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9754 {
   9755 
   9756 	KASSERT(WM_CORE_LOCKED(sc));
   9757 
   9758 	if (sc->sc_flags & WM_F_HAS_MII)
   9759 		wm_linkintr_gmii(sc, icr);
   9760 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9761 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9762 		wm_linkintr_serdes(sc, icr);
   9763 	else
   9764 		wm_linkintr_tbi(sc, icr);
   9765 }
   9766 
   9767 
   9768 static inline void
   9769 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9770 {
   9771 
   9772 	if (wmq->wmq_txrx_use_workqueue)
   9773 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9774 	else
   9775 		softint_schedule(wmq->wmq_si);
   9776 }
   9777 
   9778 /*
   9779  * wm_intr_legacy:
   9780  *
   9781  *	Interrupt service routine for INTx and MSI.
   9782  */
   9783 static int
   9784 wm_intr_legacy(void *arg)
   9785 {
   9786 	struct wm_softc *sc = arg;
   9787 	struct wm_queue *wmq = &sc->sc_queue[0];
   9788 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9789 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9790 	uint32_t icr, rndval = 0;
   9791 	int handled = 0;
   9792 
   9793 	while (1 /* CONSTCOND */) {
   9794 		icr = CSR_READ(sc, WMREG_ICR);
   9795 		if ((icr & sc->sc_icr) == 0)
   9796 			break;
   9797 		if (handled == 0)
   9798 			DPRINTF(sc, WM_DEBUG_TX,
   9799 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9800 		if (rndval == 0)
   9801 			rndval = icr;
   9802 
   9803 		mutex_enter(rxq->rxq_lock);
   9804 
   9805 		if (rxq->rxq_stopping) {
   9806 			mutex_exit(rxq->rxq_lock);
   9807 			break;
   9808 		}
   9809 
   9810 		handled = 1;
   9811 
   9812 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9813 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9814 			DPRINTF(sc, WM_DEBUG_RX,
   9815 			    ("%s: RX: got Rx intr 0x%08x\n",
   9816 				device_xname(sc->sc_dev),
   9817 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9818 			WM_Q_EVCNT_INCR(rxq, intr);
   9819 		}
   9820 #endif
   9821 		/*
   9822 		 * wm_rxeof() does *not* call upper layer functions directly,
   9823 		 * as if_percpuq_enqueue() just call softint_schedule().
   9824 		 * So, we can call wm_rxeof() in interrupt context.
   9825 		 */
   9826 		wm_rxeof(rxq, UINT_MAX);
   9827 
   9828 		mutex_exit(rxq->rxq_lock);
   9829 		mutex_enter(txq->txq_lock);
   9830 
   9831 		if (txq->txq_stopping) {
   9832 			mutex_exit(txq->txq_lock);
   9833 			break;
   9834 		}
   9835 
   9836 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9837 		if (icr & ICR_TXDW) {
   9838 			DPRINTF(sc, WM_DEBUG_TX,
   9839 			    ("%s: TX: got TXDW interrupt\n",
   9840 				device_xname(sc->sc_dev)));
   9841 			WM_Q_EVCNT_INCR(txq, txdw);
   9842 		}
   9843 #endif
   9844 		wm_txeof(txq, UINT_MAX);
   9845 
   9846 		mutex_exit(txq->txq_lock);
   9847 		WM_CORE_LOCK(sc);
   9848 
   9849 		if (sc->sc_core_stopping) {
   9850 			WM_CORE_UNLOCK(sc);
   9851 			break;
   9852 		}
   9853 
   9854 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9855 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9856 			wm_linkintr(sc, icr);
   9857 		}
   9858 		if ((icr & ICR_GPI(0)) != 0)
   9859 			device_printf(sc->sc_dev, "got module interrupt\n");
   9860 
   9861 		WM_CORE_UNLOCK(sc);
   9862 
   9863 		if (icr & ICR_RXO) {
   9864 #if defined(WM_DEBUG)
   9865 			log(LOG_WARNING, "%s: Receive overrun\n",
   9866 			    device_xname(sc->sc_dev));
   9867 #endif /* defined(WM_DEBUG) */
   9868 		}
   9869 	}
   9870 
   9871 	rnd_add_uint32(&sc->rnd_source, rndval);
   9872 
   9873 	if (handled) {
   9874 		/* Try to get more packets going. */
   9875 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9876 		wm_sched_handle_queue(sc, wmq);
   9877 	}
   9878 
   9879 	return handled;
   9880 }
   9881 
   9882 static inline void
   9883 wm_txrxintr_disable(struct wm_queue *wmq)
   9884 {
   9885 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9886 
   9887 	if (sc->sc_type == WM_T_82574)
   9888 		CSR_WRITE(sc, WMREG_IMC,
   9889 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9890 	else if (sc->sc_type == WM_T_82575)
   9891 		CSR_WRITE(sc, WMREG_EIMC,
   9892 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9893 	else
   9894 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9895 }
   9896 
   9897 static inline void
   9898 wm_txrxintr_enable(struct wm_queue *wmq)
   9899 {
   9900 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9901 
   9902 	wm_itrs_calculate(sc, wmq);
   9903 
   9904 	/*
   9905 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9906 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9907 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9908 	 * while each wm_handle_queue(wmq) is runnig.
   9909 	 */
   9910 	if (sc->sc_type == WM_T_82574)
   9911 		CSR_WRITE(sc, WMREG_IMS,
   9912 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9913 	else if (sc->sc_type == WM_T_82575)
   9914 		CSR_WRITE(sc, WMREG_EIMS,
   9915 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9916 	else
   9917 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9918 }
   9919 
   9920 static int
   9921 wm_txrxintr_msix(void *arg)
   9922 {
   9923 	struct wm_queue *wmq = arg;
   9924 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9925 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9926 	struct wm_softc *sc = txq->txq_sc;
   9927 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9928 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9929 	bool txmore;
   9930 	bool rxmore;
   9931 
   9932 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9933 
   9934 	DPRINTF(sc, WM_DEBUG_TX,
   9935 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9936 
   9937 	wm_txrxintr_disable(wmq);
   9938 
   9939 	mutex_enter(txq->txq_lock);
   9940 
   9941 	if (txq->txq_stopping) {
   9942 		mutex_exit(txq->txq_lock);
   9943 		return 0;
   9944 	}
   9945 
   9946 	WM_Q_EVCNT_INCR(txq, txdw);
   9947 	txmore = wm_txeof(txq, txlimit);
   9948 	/* wm_deferred start() is done in wm_handle_queue(). */
   9949 	mutex_exit(txq->txq_lock);
   9950 
   9951 	DPRINTF(sc, WM_DEBUG_RX,
   9952 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9953 	mutex_enter(rxq->rxq_lock);
   9954 
   9955 	if (rxq->rxq_stopping) {
   9956 		mutex_exit(rxq->rxq_lock);
   9957 		return 0;
   9958 	}
   9959 
   9960 	WM_Q_EVCNT_INCR(rxq, intr);
   9961 	rxmore = wm_rxeof(rxq, rxlimit);
   9962 	mutex_exit(rxq->rxq_lock);
   9963 
   9964 	wm_itrs_writereg(sc, wmq);
   9965 
   9966 	if (txmore || rxmore) {
   9967 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9968 		wm_sched_handle_queue(sc, wmq);
   9969 	} else
   9970 		wm_txrxintr_enable(wmq);
   9971 
   9972 	return 1;
   9973 }
   9974 
   9975 static void
   9976 wm_handle_queue(void *arg)
   9977 {
   9978 	struct wm_queue *wmq = arg;
   9979 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9980 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9981 	struct wm_softc *sc = txq->txq_sc;
   9982 	u_int txlimit = sc->sc_tx_process_limit;
   9983 	u_int rxlimit = sc->sc_rx_process_limit;
   9984 	bool txmore;
   9985 	bool rxmore;
   9986 
   9987 	mutex_enter(txq->txq_lock);
   9988 	if (txq->txq_stopping) {
   9989 		mutex_exit(txq->txq_lock);
   9990 		return;
   9991 	}
   9992 	txmore = wm_txeof(txq, txlimit);
   9993 	wm_deferred_start_locked(txq);
   9994 	mutex_exit(txq->txq_lock);
   9995 
   9996 	mutex_enter(rxq->rxq_lock);
   9997 	if (rxq->rxq_stopping) {
   9998 		mutex_exit(rxq->rxq_lock);
   9999 		return;
   10000 	}
   10001 	WM_Q_EVCNT_INCR(rxq, defer);
   10002 	rxmore = wm_rxeof(rxq, rxlimit);
   10003 	mutex_exit(rxq->rxq_lock);
   10004 
   10005 	if (txmore || rxmore) {
   10006 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10007 		wm_sched_handle_queue(sc, wmq);
   10008 	} else
   10009 		wm_txrxintr_enable(wmq);
   10010 }
   10011 
   10012 static void
   10013 wm_handle_queue_work(struct work *wk, void *context)
   10014 {
   10015 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10016 
   10017 	/*
   10018 	 * "enqueued flag" is not required here.
   10019 	 */
   10020 	wm_handle_queue(wmq);
   10021 }
   10022 
   10023 /*
   10024  * wm_linkintr_msix:
   10025  *
   10026  *	Interrupt service routine for link status change for MSI-X.
   10027  */
   10028 static int
   10029 wm_linkintr_msix(void *arg)
   10030 {
   10031 	struct wm_softc *sc = arg;
   10032 	uint32_t reg;
   10033 	bool has_rxo;
   10034 
   10035 	reg = CSR_READ(sc, WMREG_ICR);
   10036 	WM_CORE_LOCK(sc);
   10037 	DPRINTF(sc, WM_DEBUG_LINK,
   10038 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10039 		device_xname(sc->sc_dev), reg));
   10040 
   10041 	if (sc->sc_core_stopping)
   10042 		goto out;
   10043 
   10044 	if ((reg & ICR_LSC) != 0) {
   10045 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10046 		wm_linkintr(sc, ICR_LSC);
   10047 	}
   10048 	if ((reg & ICR_GPI(0)) != 0)
   10049 		device_printf(sc->sc_dev, "got module interrupt\n");
   10050 
   10051 	/*
   10052 	 * XXX 82574 MSI-X mode workaround
   10053 	 *
   10054 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10055 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10056 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10057 	 * interrupts by writing WMREG_ICS to process receive packets.
   10058 	 */
   10059 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10060 #if defined(WM_DEBUG)
   10061 		log(LOG_WARNING, "%s: Receive overrun\n",
   10062 		    device_xname(sc->sc_dev));
   10063 #endif /* defined(WM_DEBUG) */
   10064 
   10065 		has_rxo = true;
   10066 		/*
   10067 		 * The RXO interrupt is very high rate when receive traffic is
   10068 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10069 		 * interrupts. ICR_OTHER will be enabled at the end of
   10070 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10071 		 * ICR_RXQ(1) interrupts.
   10072 		 */
   10073 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10074 
   10075 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10076 	}
   10077 
   10078 
   10079 
   10080 out:
   10081 	WM_CORE_UNLOCK(sc);
   10082 
   10083 	if (sc->sc_type == WM_T_82574) {
   10084 		if (!has_rxo)
   10085 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10086 		else
   10087 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10088 	} else if (sc->sc_type == WM_T_82575)
   10089 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10090 	else
   10091 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10092 
   10093 	return 1;
   10094 }
   10095 
   10096 /*
   10097  * Media related.
   10098  * GMII, SGMII, TBI (and SERDES)
   10099  */
   10100 
   10101 /* Common */
   10102 
   10103 /*
   10104  * wm_tbi_serdes_set_linkled:
   10105  *
   10106  *	Update the link LED on TBI and SERDES devices.
   10107  */
   10108 static void
   10109 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10110 {
   10111 
   10112 	if (sc->sc_tbi_linkup)
   10113 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10114 	else
   10115 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10116 
   10117 	/* 82540 or newer devices are active low */
   10118 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10119 
   10120 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10121 }
   10122 
   10123 /* GMII related */
   10124 
   10125 /*
   10126  * wm_gmii_reset:
   10127  *
   10128  *	Reset the PHY.
   10129  */
   10130 static void
   10131 wm_gmii_reset(struct wm_softc *sc)
   10132 {
   10133 	uint32_t reg;
   10134 	int rv;
   10135 
   10136 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10137 		device_xname(sc->sc_dev), __func__));
   10138 
   10139 	rv = sc->phy.acquire(sc);
   10140 	if (rv != 0) {
   10141 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10142 		    __func__);
   10143 		return;
   10144 	}
   10145 
   10146 	switch (sc->sc_type) {
   10147 	case WM_T_82542_2_0:
   10148 	case WM_T_82542_2_1:
   10149 		/* null */
   10150 		break;
   10151 	case WM_T_82543:
   10152 		/*
   10153 		 * With 82543, we need to force speed and duplex on the MAC
   10154 		 * equal to what the PHY speed and duplex configuration is.
   10155 		 * In addition, we need to perform a hardware reset on the PHY
   10156 		 * to take it out of reset.
   10157 		 */
   10158 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10159 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10160 
   10161 		/* The PHY reset pin is active-low. */
   10162 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10163 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10164 		    CTRL_EXT_SWDPIN(4));
   10165 		reg |= CTRL_EXT_SWDPIO(4);
   10166 
   10167 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10168 		CSR_WRITE_FLUSH(sc);
   10169 		delay(10*1000);
   10170 
   10171 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10172 		CSR_WRITE_FLUSH(sc);
   10173 		delay(150);
   10174 #if 0
   10175 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10176 #endif
   10177 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10178 		break;
   10179 	case WM_T_82544:	/* Reset 10000us */
   10180 	case WM_T_82540:
   10181 	case WM_T_82545:
   10182 	case WM_T_82545_3:
   10183 	case WM_T_82546:
   10184 	case WM_T_82546_3:
   10185 	case WM_T_82541:
   10186 	case WM_T_82541_2:
   10187 	case WM_T_82547:
   10188 	case WM_T_82547_2:
   10189 	case WM_T_82571:	/* Reset 100us */
   10190 	case WM_T_82572:
   10191 	case WM_T_82573:
   10192 	case WM_T_82574:
   10193 	case WM_T_82575:
   10194 	case WM_T_82576:
   10195 	case WM_T_82580:
   10196 	case WM_T_I350:
   10197 	case WM_T_I354:
   10198 	case WM_T_I210:
   10199 	case WM_T_I211:
   10200 	case WM_T_82583:
   10201 	case WM_T_80003:
   10202 		/* Generic reset */
   10203 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10204 		CSR_WRITE_FLUSH(sc);
   10205 		delay(20000);
   10206 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10207 		CSR_WRITE_FLUSH(sc);
   10208 		delay(20000);
   10209 
   10210 		if ((sc->sc_type == WM_T_82541)
   10211 		    || (sc->sc_type == WM_T_82541_2)
   10212 		    || (sc->sc_type == WM_T_82547)
   10213 		    || (sc->sc_type == WM_T_82547_2)) {
   10214 			/* Workaround for igp are done in igp_reset() */
   10215 			/* XXX add code to set LED after phy reset */
   10216 		}
   10217 		break;
   10218 	case WM_T_ICH8:
   10219 	case WM_T_ICH9:
   10220 	case WM_T_ICH10:
   10221 	case WM_T_PCH:
   10222 	case WM_T_PCH2:
   10223 	case WM_T_PCH_LPT:
   10224 	case WM_T_PCH_SPT:
   10225 	case WM_T_PCH_CNP:
   10226 		/* Generic reset */
   10227 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10228 		CSR_WRITE_FLUSH(sc);
   10229 		delay(100);
   10230 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10231 		CSR_WRITE_FLUSH(sc);
   10232 		delay(150);
   10233 		break;
   10234 	default:
   10235 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10236 		    __func__);
   10237 		break;
   10238 	}
   10239 
   10240 	sc->phy.release(sc);
   10241 
   10242 	/* get_cfg_done */
   10243 	wm_get_cfg_done(sc);
   10244 
   10245 	/* Extra setup */
   10246 	switch (sc->sc_type) {
   10247 	case WM_T_82542_2_0:
   10248 	case WM_T_82542_2_1:
   10249 	case WM_T_82543:
   10250 	case WM_T_82544:
   10251 	case WM_T_82540:
   10252 	case WM_T_82545:
   10253 	case WM_T_82545_3:
   10254 	case WM_T_82546:
   10255 	case WM_T_82546_3:
   10256 	case WM_T_82541_2:
   10257 	case WM_T_82547_2:
   10258 	case WM_T_82571:
   10259 	case WM_T_82572:
   10260 	case WM_T_82573:
   10261 	case WM_T_82574:
   10262 	case WM_T_82583:
   10263 	case WM_T_82575:
   10264 	case WM_T_82576:
   10265 	case WM_T_82580:
   10266 	case WM_T_I350:
   10267 	case WM_T_I354:
   10268 	case WM_T_I210:
   10269 	case WM_T_I211:
   10270 	case WM_T_80003:
   10271 		/* Null */
   10272 		break;
   10273 	case WM_T_82541:
   10274 	case WM_T_82547:
   10275 		/* XXX Configure actively LED after PHY reset */
   10276 		break;
   10277 	case WM_T_ICH8:
   10278 	case WM_T_ICH9:
   10279 	case WM_T_ICH10:
   10280 	case WM_T_PCH:
   10281 	case WM_T_PCH2:
   10282 	case WM_T_PCH_LPT:
   10283 	case WM_T_PCH_SPT:
   10284 	case WM_T_PCH_CNP:
   10285 		wm_phy_post_reset(sc);
   10286 		break;
   10287 	default:
   10288 		panic("%s: unknown type\n", __func__);
   10289 		break;
   10290 	}
   10291 }
   10292 
   10293 /*
   10294  * Setup sc_phytype and mii_{read|write}reg.
   10295  *
   10296  *  To identify PHY type, correct read/write function should be selected.
   10297  * To select correct read/write function, PCI ID or MAC type are required
   10298  * without accessing PHY registers.
   10299  *
   10300  *  On the first call of this function, PHY ID is not known yet. Check
   10301  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10302  * result might be incorrect.
   10303  *
   10304  *  In the second call, PHY OUI and model is used to identify PHY type.
   10305  * It might not be perfect because of the lack of compared entry, but it
   10306  * would be better than the first call.
   10307  *
   10308  *  If the detected new result and previous assumption is different,
   10309  * diagnous message will be printed.
   10310  */
   10311 static void
   10312 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10313     uint16_t phy_model)
   10314 {
   10315 	device_t dev = sc->sc_dev;
   10316 	struct mii_data *mii = &sc->sc_mii;
   10317 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10318 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10319 	mii_readreg_t new_readreg;
   10320 	mii_writereg_t new_writereg;
   10321 	bool dodiag = true;
   10322 
   10323 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10324 		device_xname(sc->sc_dev), __func__));
   10325 
   10326 	/*
   10327 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10328 	 * incorrect. So don't print diag output when it's 2nd call.
   10329 	 */
   10330 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10331 		dodiag = false;
   10332 
   10333 	if (mii->mii_readreg == NULL) {
   10334 		/*
   10335 		 *  This is the first call of this function. For ICH and PCH
   10336 		 * variants, it's difficult to determine the PHY access method
   10337 		 * by sc_type, so use the PCI product ID for some devices.
   10338 		 */
   10339 
   10340 		switch (sc->sc_pcidevid) {
   10341 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10342 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10343 			/* 82577 */
   10344 			new_phytype = WMPHY_82577;
   10345 			break;
   10346 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10347 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10348 			/* 82578 */
   10349 			new_phytype = WMPHY_82578;
   10350 			break;
   10351 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10352 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10353 			/* 82579 */
   10354 			new_phytype = WMPHY_82579;
   10355 			break;
   10356 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10357 		case PCI_PRODUCT_INTEL_82801I_BM:
   10358 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10359 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10360 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10361 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10362 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10363 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10364 			/* ICH8, 9, 10 with 82567 */
   10365 			new_phytype = WMPHY_BM;
   10366 			break;
   10367 		default:
   10368 			break;
   10369 		}
   10370 	} else {
   10371 		/* It's not the first call. Use PHY OUI and model */
   10372 		switch (phy_oui) {
   10373 		case MII_OUI_ATTANSIC: /* XXX ??? */
   10374 			switch (phy_model) {
   10375 			case 0x0004: /* XXX */
   10376 				new_phytype = WMPHY_82578;
   10377 				break;
   10378 			default:
   10379 				break;
   10380 			}
   10381 			break;
   10382 		case MII_OUI_xxMARVELL:
   10383 			switch (phy_model) {
   10384 			case MII_MODEL_xxMARVELL_I210:
   10385 				new_phytype = WMPHY_I210;
   10386 				break;
   10387 			case MII_MODEL_xxMARVELL_E1011:
   10388 			case MII_MODEL_xxMARVELL_E1000_3:
   10389 			case MII_MODEL_xxMARVELL_E1000_5:
   10390 			case MII_MODEL_xxMARVELL_E1112:
   10391 				new_phytype = WMPHY_M88;
   10392 				break;
   10393 			case MII_MODEL_xxMARVELL_E1149:
   10394 				new_phytype = WMPHY_BM;
   10395 				break;
   10396 			case MII_MODEL_xxMARVELL_E1111:
   10397 			case MII_MODEL_xxMARVELL_I347:
   10398 			case MII_MODEL_xxMARVELL_E1512:
   10399 			case MII_MODEL_xxMARVELL_E1340M:
   10400 			case MII_MODEL_xxMARVELL_E1543:
   10401 				new_phytype = WMPHY_M88;
   10402 				break;
   10403 			case MII_MODEL_xxMARVELL_I82563:
   10404 				new_phytype = WMPHY_GG82563;
   10405 				break;
   10406 			default:
   10407 				break;
   10408 			}
   10409 			break;
   10410 		case MII_OUI_INTEL:
   10411 			switch (phy_model) {
   10412 			case MII_MODEL_INTEL_I82577:
   10413 				new_phytype = WMPHY_82577;
   10414 				break;
   10415 			case MII_MODEL_INTEL_I82579:
   10416 				new_phytype = WMPHY_82579;
   10417 				break;
   10418 			case MII_MODEL_INTEL_I217:
   10419 				new_phytype = WMPHY_I217;
   10420 				break;
   10421 			case MII_MODEL_INTEL_I82580:
   10422 				new_phytype = WMPHY_82580;
   10423 				break;
   10424 			case MII_MODEL_INTEL_I350:
   10425 				new_phytype = WMPHY_I350;
   10426 				break;
   10427 				break;
   10428 			default:
   10429 				break;
   10430 			}
   10431 			break;
   10432 		case MII_OUI_yyINTEL:
   10433 			switch (phy_model) {
   10434 			case MII_MODEL_yyINTEL_I82562G:
   10435 			case MII_MODEL_yyINTEL_I82562EM:
   10436 			case MII_MODEL_yyINTEL_I82562ET:
   10437 				new_phytype = WMPHY_IFE;
   10438 				break;
   10439 			case MII_MODEL_yyINTEL_IGP01E1000:
   10440 				new_phytype = WMPHY_IGP;
   10441 				break;
   10442 			case MII_MODEL_yyINTEL_I82566:
   10443 				new_phytype = WMPHY_IGP_3;
   10444 				break;
   10445 			default:
   10446 				break;
   10447 			}
   10448 			break;
   10449 		default:
   10450 			break;
   10451 		}
   10452 
   10453 		if (dodiag) {
   10454 			if (new_phytype == WMPHY_UNKNOWN)
   10455 				aprint_verbose_dev(dev,
   10456 				    "%s: Unknown PHY model. OUI=%06x, "
   10457 				    "model=%04x\n", __func__, phy_oui,
   10458 				    phy_model);
   10459 
   10460 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10461 			    && (sc->sc_phytype != new_phytype)) {
   10462 				aprint_error_dev(dev, "Previously assumed PHY "
   10463 				    "type(%u) was incorrect. PHY type from PHY"
   10464 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10465 			}
   10466 		}
   10467 	}
   10468 
   10469 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10470 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10471 		/* SGMII */
   10472 		new_readreg = wm_sgmii_readreg;
   10473 		new_writereg = wm_sgmii_writereg;
   10474 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10475 		/* BM2 (phyaddr == 1) */
   10476 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10477 		    && (new_phytype != WMPHY_BM)
   10478 		    && (new_phytype != WMPHY_UNKNOWN))
   10479 			doubt_phytype = new_phytype;
   10480 		new_phytype = WMPHY_BM;
   10481 		new_readreg = wm_gmii_bm_readreg;
   10482 		new_writereg = wm_gmii_bm_writereg;
   10483 	} else if (sc->sc_type >= WM_T_PCH) {
   10484 		/* All PCH* use _hv_ */
   10485 		new_readreg = wm_gmii_hv_readreg;
   10486 		new_writereg = wm_gmii_hv_writereg;
   10487 	} else if (sc->sc_type >= WM_T_ICH8) {
   10488 		/* non-82567 ICH8, 9 and 10 */
   10489 		new_readreg = wm_gmii_i82544_readreg;
   10490 		new_writereg = wm_gmii_i82544_writereg;
   10491 	} else if (sc->sc_type >= WM_T_80003) {
   10492 		/* 80003 */
   10493 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10494 		    && (new_phytype != WMPHY_GG82563)
   10495 		    && (new_phytype != WMPHY_UNKNOWN))
   10496 			doubt_phytype = new_phytype;
   10497 		new_phytype = WMPHY_GG82563;
   10498 		new_readreg = wm_gmii_i80003_readreg;
   10499 		new_writereg = wm_gmii_i80003_writereg;
   10500 	} else if (sc->sc_type >= WM_T_I210) {
   10501 		/* I210 and I211 */
   10502 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10503 		    && (new_phytype != WMPHY_I210)
   10504 		    && (new_phytype != WMPHY_UNKNOWN))
   10505 			doubt_phytype = new_phytype;
   10506 		new_phytype = WMPHY_I210;
   10507 		new_readreg = wm_gmii_gs40g_readreg;
   10508 		new_writereg = wm_gmii_gs40g_writereg;
   10509 	} else if (sc->sc_type >= WM_T_82580) {
   10510 		/* 82580, I350 and I354 */
   10511 		new_readreg = wm_gmii_82580_readreg;
   10512 		new_writereg = wm_gmii_82580_writereg;
   10513 	} else if (sc->sc_type >= WM_T_82544) {
   10514 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10515 		new_readreg = wm_gmii_i82544_readreg;
   10516 		new_writereg = wm_gmii_i82544_writereg;
   10517 	} else {
   10518 		new_readreg = wm_gmii_i82543_readreg;
   10519 		new_writereg = wm_gmii_i82543_writereg;
   10520 	}
   10521 
   10522 	if (new_phytype == WMPHY_BM) {
   10523 		/* All BM use _bm_ */
   10524 		new_readreg = wm_gmii_bm_readreg;
   10525 		new_writereg = wm_gmii_bm_writereg;
   10526 	}
   10527 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10528 		/* All PCH* use _hv_ */
   10529 		new_readreg = wm_gmii_hv_readreg;
   10530 		new_writereg = wm_gmii_hv_writereg;
   10531 	}
   10532 
   10533 	/* Diag output */
   10534 	if (dodiag) {
   10535 		if (doubt_phytype != WMPHY_UNKNOWN)
   10536 			aprint_error_dev(dev, "Assumed new PHY type was "
   10537 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10538 			    new_phytype);
   10539 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10540 		    && (sc->sc_phytype != new_phytype))
   10541 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10542 			    "was incorrect. New PHY type = %u\n",
   10543 			    sc->sc_phytype, new_phytype);
   10544 
   10545 		if ((mii->mii_readreg != NULL) &&
   10546 		    (new_phytype == WMPHY_UNKNOWN))
   10547 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10548 
   10549 		if ((mii->mii_readreg != NULL) &&
   10550 		    (mii->mii_readreg != new_readreg))
   10551 			aprint_error_dev(dev, "Previously assumed PHY "
   10552 			    "read/write function was incorrect.\n");
   10553 	}
   10554 
   10555 	/* Update now */
   10556 	sc->sc_phytype = new_phytype;
   10557 	mii->mii_readreg = new_readreg;
   10558 	mii->mii_writereg = new_writereg;
   10559 	if (new_readreg == wm_gmii_hv_readreg) {
   10560 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10561 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10562 	} else if (new_readreg == wm_sgmii_readreg) {
   10563 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10564 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10565 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10566 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10567 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10568 	}
   10569 }
   10570 
   10571 /*
   10572  * wm_get_phy_id_82575:
   10573  *
   10574  * Return PHY ID. Return -1 if it failed.
   10575  */
   10576 static int
   10577 wm_get_phy_id_82575(struct wm_softc *sc)
   10578 {
   10579 	uint32_t reg;
   10580 	int phyid = -1;
   10581 
   10582 	/* XXX */
   10583 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10584 		return -1;
   10585 
   10586 	if (wm_sgmii_uses_mdio(sc)) {
   10587 		switch (sc->sc_type) {
   10588 		case WM_T_82575:
   10589 		case WM_T_82576:
   10590 			reg = CSR_READ(sc, WMREG_MDIC);
   10591 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10592 			break;
   10593 		case WM_T_82580:
   10594 		case WM_T_I350:
   10595 		case WM_T_I354:
   10596 		case WM_T_I210:
   10597 		case WM_T_I211:
   10598 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10599 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10600 			break;
   10601 		default:
   10602 			return -1;
   10603 		}
   10604 	}
   10605 
   10606 	return phyid;
   10607 }
   10608 
   10609 /*
   10610  * wm_gmii_mediainit:
   10611  *
   10612  *	Initialize media for use on 1000BASE-T devices.
   10613  */
   10614 static void
   10615 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10616 {
   10617 	device_t dev = sc->sc_dev;
   10618 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10619 	struct mii_data *mii = &sc->sc_mii;
   10620 
   10621 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10622 		device_xname(sc->sc_dev), __func__));
   10623 
   10624 	/* We have GMII. */
   10625 	sc->sc_flags |= WM_F_HAS_MII;
   10626 
   10627 	if (sc->sc_type == WM_T_80003)
   10628 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10629 	else
   10630 		sc->sc_tipg = TIPG_1000T_DFLT;
   10631 
   10632 	/*
   10633 	 * Let the chip set speed/duplex on its own based on
   10634 	 * signals from the PHY.
   10635 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10636 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10637 	 */
   10638 	sc->sc_ctrl |= CTRL_SLU;
   10639 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10640 
   10641 	/* Initialize our media structures and probe the GMII. */
   10642 	mii->mii_ifp = ifp;
   10643 
   10644 	mii->mii_statchg = wm_gmii_statchg;
   10645 
   10646 	/* get PHY control from SMBus to PCIe */
   10647 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10648 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10649 	    || (sc->sc_type == WM_T_PCH_CNP))
   10650 		wm_init_phy_workarounds_pchlan(sc);
   10651 
   10652 	wm_gmii_reset(sc);
   10653 
   10654 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10655 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10656 	    wm_gmii_mediastatus, sc->sc_core_lock);
   10657 
   10658 	/* Setup internal SGMII PHY for SFP */
   10659 	wm_sgmii_sfp_preconfig(sc);
   10660 
   10661 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10662 	    || (sc->sc_type == WM_T_82580)
   10663 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10664 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10665 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10666 			/* Attach only one port */
   10667 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10668 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10669 		} else {
   10670 			int i, id;
   10671 			uint32_t ctrl_ext;
   10672 
   10673 			id = wm_get_phy_id_82575(sc);
   10674 			if (id != -1) {
   10675 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10676 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10677 			}
   10678 			if ((id == -1)
   10679 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10680 				/* Power on sgmii phy if it is disabled */
   10681 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10682 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10683 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10684 				CSR_WRITE_FLUSH(sc);
   10685 				delay(300*1000); /* XXX too long */
   10686 
   10687 				/*
   10688 				 * From 1 to 8.
   10689 				 *
   10690 				 * I2C access fails with I2C register's ERROR
   10691 				 * bit set, so prevent error message while
   10692 				 * scanning.
   10693 				 */
   10694 				sc->phy.no_errprint = true;
   10695 				for (i = 1; i < 8; i++)
   10696 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10697 					    0xffffffff, i, MII_OFFSET_ANY,
   10698 					    MIIF_DOPAUSE);
   10699 				sc->phy.no_errprint = false;
   10700 
   10701 				/* Restore previous sfp cage power state */
   10702 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10703 			}
   10704 		}
   10705 	} else
   10706 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10707 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10708 
   10709 	/*
   10710 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10711 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10712 	 */
   10713 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10714 		|| (sc->sc_type == WM_T_PCH_SPT)
   10715 		|| (sc->sc_type == WM_T_PCH_CNP))
   10716 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10717 		wm_set_mdio_slow_mode_hv(sc);
   10718 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10719 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10720 	}
   10721 
   10722 	/*
   10723 	 * (For ICH8 variants)
   10724 	 * If PHY detection failed, use BM's r/w function and retry.
   10725 	 */
   10726 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10727 		/* if failed, retry with *_bm_* */
   10728 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10729 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10730 		    sc->sc_phytype);
   10731 		sc->sc_phytype = WMPHY_BM;
   10732 		mii->mii_readreg = wm_gmii_bm_readreg;
   10733 		mii->mii_writereg = wm_gmii_bm_writereg;
   10734 
   10735 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10736 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10737 	}
   10738 
   10739 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10740 		/* Any PHY wasn't find */
   10741 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10742 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10743 		sc->sc_phytype = WMPHY_NONE;
   10744 	} else {
   10745 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10746 
   10747 		/*
   10748 		 * PHY Found! Check PHY type again by the second call of
   10749 		 * wm_gmii_setup_phytype.
   10750 		 */
   10751 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10752 		    child->mii_mpd_model);
   10753 
   10754 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10755 	}
   10756 }
   10757 
   10758 /*
   10759  * wm_gmii_mediachange:	[ifmedia interface function]
   10760  *
   10761  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10762  */
   10763 static int
   10764 wm_gmii_mediachange(struct ifnet *ifp)
   10765 {
   10766 	struct wm_softc *sc = ifp->if_softc;
   10767 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10768 	uint32_t reg;
   10769 	int rc;
   10770 
   10771 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10772 		device_xname(sc->sc_dev), __func__));
   10773 	if ((ifp->if_flags & IFF_UP) == 0)
   10774 		return 0;
   10775 
   10776 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10777 	if ((sc->sc_type == WM_T_82580)
   10778 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10779 	    || (sc->sc_type == WM_T_I211)) {
   10780 		reg = CSR_READ(sc, WMREG_PHPM);
   10781 		reg &= ~PHPM_GO_LINK_D;
   10782 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10783 	}
   10784 
   10785 	/* Disable D0 LPLU. */
   10786 	wm_lplu_d0_disable(sc);
   10787 
   10788 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10789 	sc->sc_ctrl |= CTRL_SLU;
   10790 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10791 	    || (sc->sc_type > WM_T_82543)) {
   10792 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10793 	} else {
   10794 		sc->sc_ctrl &= ~CTRL_ASDE;
   10795 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10796 		if (ife->ifm_media & IFM_FDX)
   10797 			sc->sc_ctrl |= CTRL_FD;
   10798 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10799 		case IFM_10_T:
   10800 			sc->sc_ctrl |= CTRL_SPEED_10;
   10801 			break;
   10802 		case IFM_100_TX:
   10803 			sc->sc_ctrl |= CTRL_SPEED_100;
   10804 			break;
   10805 		case IFM_1000_T:
   10806 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10807 			break;
   10808 		case IFM_NONE:
   10809 			/* There is no specific setting for IFM_NONE */
   10810 			break;
   10811 		default:
   10812 			panic("wm_gmii_mediachange: bad media 0x%x",
   10813 			    ife->ifm_media);
   10814 		}
   10815 	}
   10816 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10817 	CSR_WRITE_FLUSH(sc);
   10818 
   10819 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10820 		wm_serdes_mediachange(ifp);
   10821 
   10822 	if (sc->sc_type <= WM_T_82543)
   10823 		wm_gmii_reset(sc);
   10824 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10825 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10826 		/* allow time for SFP cage time to power up phy */
   10827 		delay(300 * 1000);
   10828 		wm_gmii_reset(sc);
   10829 	}
   10830 
   10831 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10832 		return 0;
   10833 	return rc;
   10834 }
   10835 
   10836 /*
   10837  * wm_gmii_mediastatus:	[ifmedia interface function]
   10838  *
   10839  *	Get the current interface media status on a 1000BASE-T device.
   10840  */
   10841 static void
   10842 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10843 {
   10844 	struct wm_softc *sc = ifp->if_softc;
   10845 
   10846 	ether_mediastatus(ifp, ifmr);
   10847 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10848 	    | sc->sc_flowflags;
   10849 }
   10850 
   10851 #define	MDI_IO		CTRL_SWDPIN(2)
   10852 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10853 #define	MDI_CLK		CTRL_SWDPIN(3)
   10854 
   10855 static void
   10856 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10857 {
   10858 	uint32_t i, v;
   10859 
   10860 	v = CSR_READ(sc, WMREG_CTRL);
   10861 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10862 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10863 
   10864 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10865 		if (data & i)
   10866 			v |= MDI_IO;
   10867 		else
   10868 			v &= ~MDI_IO;
   10869 		CSR_WRITE(sc, WMREG_CTRL, v);
   10870 		CSR_WRITE_FLUSH(sc);
   10871 		delay(10);
   10872 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10873 		CSR_WRITE_FLUSH(sc);
   10874 		delay(10);
   10875 		CSR_WRITE(sc, WMREG_CTRL, v);
   10876 		CSR_WRITE_FLUSH(sc);
   10877 		delay(10);
   10878 	}
   10879 }
   10880 
   10881 static uint16_t
   10882 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10883 {
   10884 	uint32_t v, i;
   10885 	uint16_t data = 0;
   10886 
   10887 	v = CSR_READ(sc, WMREG_CTRL);
   10888 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10889 	v |= CTRL_SWDPIO(3);
   10890 
   10891 	CSR_WRITE(sc, WMREG_CTRL, v);
   10892 	CSR_WRITE_FLUSH(sc);
   10893 	delay(10);
   10894 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10895 	CSR_WRITE_FLUSH(sc);
   10896 	delay(10);
   10897 	CSR_WRITE(sc, WMREG_CTRL, v);
   10898 	CSR_WRITE_FLUSH(sc);
   10899 	delay(10);
   10900 
   10901 	for (i = 0; i < 16; i++) {
   10902 		data <<= 1;
   10903 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10904 		CSR_WRITE_FLUSH(sc);
   10905 		delay(10);
   10906 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10907 			data |= 1;
   10908 		CSR_WRITE(sc, WMREG_CTRL, v);
   10909 		CSR_WRITE_FLUSH(sc);
   10910 		delay(10);
   10911 	}
   10912 
   10913 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10914 	CSR_WRITE_FLUSH(sc);
   10915 	delay(10);
   10916 	CSR_WRITE(sc, WMREG_CTRL, v);
   10917 	CSR_WRITE_FLUSH(sc);
   10918 	delay(10);
   10919 
   10920 	return data;
   10921 }
   10922 
   10923 #undef MDI_IO
   10924 #undef MDI_DIR
   10925 #undef MDI_CLK
   10926 
   10927 /*
   10928  * wm_gmii_i82543_readreg:	[mii interface function]
   10929  *
   10930  *	Read a PHY register on the GMII (i82543 version).
   10931  */
   10932 static int
   10933 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10934 {
   10935 	struct wm_softc *sc = device_private(dev);
   10936 
   10937 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10938 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10939 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10940 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10941 
   10942 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10943 		device_xname(dev), phy, reg, *val));
   10944 
   10945 	return 0;
   10946 }
   10947 
   10948 /*
   10949  * wm_gmii_i82543_writereg:	[mii interface function]
   10950  *
   10951  *	Write a PHY register on the GMII (i82543 version).
   10952  */
   10953 static int
   10954 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10955 {
   10956 	struct wm_softc *sc = device_private(dev);
   10957 
   10958 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10959 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10960 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10961 	    (MII_COMMAND_START << 30), 32);
   10962 
   10963 	return 0;
   10964 }
   10965 
   10966 /*
   10967  * wm_gmii_mdic_readreg:	[mii interface function]
   10968  *
   10969  *	Read a PHY register on the GMII.
   10970  */
   10971 static int
   10972 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10973 {
   10974 	struct wm_softc *sc = device_private(dev);
   10975 	uint32_t mdic = 0;
   10976 	int i;
   10977 
   10978 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10979 	    && (reg > MII_ADDRMASK)) {
   10980 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10981 		    __func__, sc->sc_phytype, reg);
   10982 		reg &= MII_ADDRMASK;
   10983 	}
   10984 
   10985 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10986 	    MDIC_REGADD(reg));
   10987 
   10988 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10989 		delay(50);
   10990 		mdic = CSR_READ(sc, WMREG_MDIC);
   10991 		if (mdic & MDIC_READY)
   10992 			break;
   10993 	}
   10994 
   10995 	if ((mdic & MDIC_READY) == 0) {
   10996 		DPRINTF(sc, WM_DEBUG_GMII,
   10997 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10998 			device_xname(dev), phy, reg));
   10999 		return ETIMEDOUT;
   11000 	} else if (mdic & MDIC_E) {
   11001 		/* This is normal if no PHY is present. */
   11002 		DPRINTF(sc, WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   11003 			device_xname(sc->sc_dev), phy, reg));
   11004 		return -1;
   11005 	} else
   11006 		*val = MDIC_DATA(mdic);
   11007 
   11008 	/*
   11009 	 * Allow some time after each MDIC transaction to avoid
   11010 	 * reading duplicate data in the next MDIC transaction.
   11011 	 */
   11012 	if (sc->sc_type == WM_T_PCH2)
   11013 		delay(100);
   11014 
   11015 	return 0;
   11016 }
   11017 
   11018 /*
   11019  * wm_gmii_mdic_writereg:	[mii interface function]
   11020  *
   11021  *	Write a PHY register on the GMII.
   11022  */
   11023 static int
   11024 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11025 {
   11026 	struct wm_softc *sc = device_private(dev);
   11027 	uint32_t mdic = 0;
   11028 	int i;
   11029 
   11030 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11031 	    && (reg > MII_ADDRMASK)) {
   11032 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11033 		    __func__, sc->sc_phytype, reg);
   11034 		reg &= MII_ADDRMASK;
   11035 	}
   11036 
   11037 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11038 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11039 
   11040 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11041 		delay(50);
   11042 		mdic = CSR_READ(sc, WMREG_MDIC);
   11043 		if (mdic & MDIC_READY)
   11044 			break;
   11045 	}
   11046 
   11047 	if ((mdic & MDIC_READY) == 0) {
   11048 		DPRINTF(sc, WM_DEBUG_GMII,
   11049 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11050 			device_xname(dev), phy, reg));
   11051 		return ETIMEDOUT;
   11052 	} else if (mdic & MDIC_E) {
   11053 		DPRINTF(sc, WM_DEBUG_GMII,
   11054 		    ("%s: MDIC write error: phy %d reg %d\n",
   11055 			device_xname(dev), phy, reg));
   11056 		return -1;
   11057 	}
   11058 
   11059 	/*
   11060 	 * Allow some time after each MDIC transaction to avoid
   11061 	 * reading duplicate data in the next MDIC transaction.
   11062 	 */
   11063 	if (sc->sc_type == WM_T_PCH2)
   11064 		delay(100);
   11065 
   11066 	return 0;
   11067 }
   11068 
   11069 /*
   11070  * wm_gmii_i82544_readreg:	[mii interface function]
   11071  *
   11072  *	Read a PHY register on the GMII.
   11073  */
   11074 static int
   11075 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11076 {
   11077 	struct wm_softc *sc = device_private(dev);
   11078 	int rv;
   11079 
   11080 	if (sc->phy.acquire(sc)) {
   11081 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11082 		return -1;
   11083 	}
   11084 
   11085 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11086 
   11087 	sc->phy.release(sc);
   11088 
   11089 	return rv;
   11090 }
   11091 
   11092 static int
   11093 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11094 {
   11095 	struct wm_softc *sc = device_private(dev);
   11096 	int rv;
   11097 
   11098 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11099 		switch (sc->sc_phytype) {
   11100 		case WMPHY_IGP:
   11101 		case WMPHY_IGP_2:
   11102 		case WMPHY_IGP_3:
   11103 			rv = wm_gmii_mdic_writereg(dev, phy,
   11104 			    IGPHY_PAGE_SELECT, reg);
   11105 			if (rv != 0)
   11106 				return rv;
   11107 			break;
   11108 		default:
   11109 #ifdef WM_DEBUG
   11110 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11111 			    __func__, sc->sc_phytype, reg);
   11112 #endif
   11113 			break;
   11114 		}
   11115 	}
   11116 
   11117 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11118 }
   11119 
   11120 /*
   11121  * wm_gmii_i82544_writereg:	[mii interface function]
   11122  *
   11123  *	Write a PHY register on the GMII.
   11124  */
   11125 static int
   11126 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11127 {
   11128 	struct wm_softc *sc = device_private(dev);
   11129 	int rv;
   11130 
   11131 	if (sc->phy.acquire(sc)) {
   11132 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11133 		return -1;
   11134 	}
   11135 
   11136 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11137 	sc->phy.release(sc);
   11138 
   11139 	return rv;
   11140 }
   11141 
   11142 static int
   11143 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11144 {
   11145 	struct wm_softc *sc = device_private(dev);
   11146 	int rv;
   11147 
   11148 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11149 		switch (sc->sc_phytype) {
   11150 		case WMPHY_IGP:
   11151 		case WMPHY_IGP_2:
   11152 		case WMPHY_IGP_3:
   11153 			rv = wm_gmii_mdic_writereg(dev, phy,
   11154 			    IGPHY_PAGE_SELECT, reg);
   11155 			if (rv != 0)
   11156 				return rv;
   11157 			break;
   11158 		default:
   11159 #ifdef WM_DEBUG
   11160 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11161 			    __func__, sc->sc_phytype, reg);
   11162 #endif
   11163 			break;
   11164 		}
   11165 	}
   11166 
   11167 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11168 }
   11169 
   11170 /*
   11171  * wm_gmii_i80003_readreg:	[mii interface function]
   11172  *
   11173  *	Read a PHY register on the kumeran
   11174  * This could be handled by the PHY layer if we didn't have to lock the
   11175  * resource ...
   11176  */
   11177 static int
   11178 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11179 {
   11180 	struct wm_softc *sc = device_private(dev);
   11181 	int page_select;
   11182 	uint16_t temp, temp2;
   11183 	int rv = 0;
   11184 
   11185 	if (phy != 1) /* Only one PHY on kumeran bus */
   11186 		return -1;
   11187 
   11188 	if (sc->phy.acquire(sc)) {
   11189 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11190 		return -1;
   11191 	}
   11192 
   11193 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11194 		page_select = GG82563_PHY_PAGE_SELECT;
   11195 	else {
   11196 		/*
   11197 		 * Use Alternative Page Select register to access registers
   11198 		 * 30 and 31.
   11199 		 */
   11200 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11201 	}
   11202 	temp = reg >> GG82563_PAGE_SHIFT;
   11203 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11204 		goto out;
   11205 
   11206 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11207 		/*
   11208 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11209 		 * register.
   11210 		 */
   11211 		delay(200);
   11212 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11213 		if ((rv != 0) || (temp2 != temp)) {
   11214 			device_printf(dev, "%s failed\n", __func__);
   11215 			rv = -1;
   11216 			goto out;
   11217 		}
   11218 		delay(200);
   11219 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11220 		delay(200);
   11221 	} else
   11222 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11223 
   11224 out:
   11225 	sc->phy.release(sc);
   11226 	return rv;
   11227 }
   11228 
   11229 /*
   11230  * wm_gmii_i80003_writereg:	[mii interface function]
   11231  *
   11232  *	Write a PHY register on the kumeran.
   11233  * This could be handled by the PHY layer if we didn't have to lock the
   11234  * resource ...
   11235  */
   11236 static int
   11237 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11238 {
   11239 	struct wm_softc *sc = device_private(dev);
   11240 	int page_select, rv;
   11241 	uint16_t temp, temp2;
   11242 
   11243 	if (phy != 1) /* Only one PHY on kumeran bus */
   11244 		return -1;
   11245 
   11246 	if (sc->phy.acquire(sc)) {
   11247 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11248 		return -1;
   11249 	}
   11250 
   11251 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11252 		page_select = GG82563_PHY_PAGE_SELECT;
   11253 	else {
   11254 		/*
   11255 		 * Use Alternative Page Select register to access registers
   11256 		 * 30 and 31.
   11257 		 */
   11258 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11259 	}
   11260 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11261 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11262 		goto out;
   11263 
   11264 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11265 		/*
   11266 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11267 		 * register.
   11268 		 */
   11269 		delay(200);
   11270 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11271 		if ((rv != 0) || (temp2 != temp)) {
   11272 			device_printf(dev, "%s failed\n", __func__);
   11273 			rv = -1;
   11274 			goto out;
   11275 		}
   11276 		delay(200);
   11277 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11278 		delay(200);
   11279 	} else
   11280 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11281 
   11282 out:
   11283 	sc->phy.release(sc);
   11284 	return rv;
   11285 }
   11286 
   11287 /*
   11288  * wm_gmii_bm_readreg:	[mii interface function]
   11289  *
   11290  *	Read a PHY register on the kumeran
   11291  * This could be handled by the PHY layer if we didn't have to lock the
   11292  * resource ...
   11293  */
   11294 static int
   11295 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11296 {
   11297 	struct wm_softc *sc = device_private(dev);
   11298 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11299 	int rv;
   11300 
   11301 	if (sc->phy.acquire(sc)) {
   11302 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11303 		return -1;
   11304 	}
   11305 
   11306 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11307 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11308 		    || (reg == 31)) ? 1 : phy;
   11309 	/* Page 800 works differently than the rest so it has its own func */
   11310 	if (page == BM_WUC_PAGE) {
   11311 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11312 		goto release;
   11313 	}
   11314 
   11315 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11316 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11317 		    && (sc->sc_type != WM_T_82583))
   11318 			rv = wm_gmii_mdic_writereg(dev, phy,
   11319 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11320 		else
   11321 			rv = wm_gmii_mdic_writereg(dev, phy,
   11322 			    BME1000_PHY_PAGE_SELECT, page);
   11323 		if (rv != 0)
   11324 			goto release;
   11325 	}
   11326 
   11327 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11328 
   11329 release:
   11330 	sc->phy.release(sc);
   11331 	return rv;
   11332 }
   11333 
   11334 /*
   11335  * wm_gmii_bm_writereg:	[mii interface function]
   11336  *
   11337  *	Write a PHY register on the kumeran.
   11338  * This could be handled by the PHY layer if we didn't have to lock the
   11339  * resource ...
   11340  */
   11341 static int
   11342 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11343 {
   11344 	struct wm_softc *sc = device_private(dev);
   11345 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11346 	int rv;
   11347 
   11348 	if (sc->phy.acquire(sc)) {
   11349 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11350 		return -1;
   11351 	}
   11352 
   11353 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11354 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11355 		    || (reg == 31)) ? 1 : phy;
   11356 	/* Page 800 works differently than the rest so it has its own func */
   11357 	if (page == BM_WUC_PAGE) {
   11358 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11359 		goto release;
   11360 	}
   11361 
   11362 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11363 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11364 		    && (sc->sc_type != WM_T_82583))
   11365 			rv = wm_gmii_mdic_writereg(dev, phy,
   11366 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11367 		else
   11368 			rv = wm_gmii_mdic_writereg(dev, phy,
   11369 			    BME1000_PHY_PAGE_SELECT, page);
   11370 		if (rv != 0)
   11371 			goto release;
   11372 	}
   11373 
   11374 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11375 
   11376 release:
   11377 	sc->phy.release(sc);
   11378 	return rv;
   11379 }
   11380 
   11381 /*
   11382  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11383  *  @dev: pointer to the HW structure
   11384  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11385  *
   11386  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11387  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11388  */
   11389 static int
   11390 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11391 {
   11392 #ifdef WM_DEBUG
   11393 	struct wm_softc *sc = device_private(dev);
   11394 #endif
   11395 	uint16_t temp;
   11396 	int rv;
   11397 
   11398 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11399 		device_xname(dev), __func__));
   11400 
   11401 	if (!phy_regp)
   11402 		return -1;
   11403 
   11404 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11405 
   11406 	/* Select Port Control Registers page */
   11407 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11408 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11409 	if (rv != 0)
   11410 		return rv;
   11411 
   11412 	/* Read WUCE and save it */
   11413 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11414 	if (rv != 0)
   11415 		return rv;
   11416 
   11417 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11418 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11419 	 */
   11420 	temp = *phy_regp;
   11421 	temp |= BM_WUC_ENABLE_BIT;
   11422 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11423 
   11424 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11425 		return rv;
   11426 
   11427 	/* Select Host Wakeup Registers page - caller now able to write
   11428 	 * registers on the Wakeup registers page
   11429 	 */
   11430 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11431 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11432 }
   11433 
   11434 /*
   11435  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11436  *  @dev: pointer to the HW structure
   11437  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11438  *
   11439  *  Restore BM_WUC_ENABLE_REG to its original value.
   11440  *
   11441  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11442  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11443  *  caller.
   11444  */
   11445 static int
   11446 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11447 {
   11448 #ifdef WM_DEBUG
   11449 	struct wm_softc *sc = device_private(dev);
   11450 #endif
   11451 
   11452 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11453 		device_xname(dev), __func__));
   11454 
   11455 	if (!phy_regp)
   11456 		return -1;
   11457 
   11458 	/* Select Port Control Registers page */
   11459 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11460 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11461 
   11462 	/* Restore 769.17 to its original value */
   11463 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11464 
   11465 	return 0;
   11466 }
   11467 
   11468 /*
   11469  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11470  *  @sc: pointer to the HW structure
   11471  *  @offset: register offset to be read or written
   11472  *  @val: pointer to the data to read or write
   11473  *  @rd: determines if operation is read or write
   11474  *  @page_set: BM_WUC_PAGE already set and access enabled
   11475  *
   11476  *  Read the PHY register at offset and store the retrieved information in
   11477  *  data, or write data to PHY register at offset.  Note the procedure to
   11478  *  access the PHY wakeup registers is different than reading the other PHY
   11479  *  registers. It works as such:
   11480  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11481  *  2) Set page to 800 for host (801 if we were manageability)
   11482  *  3) Write the address using the address opcode (0x11)
   11483  *  4) Read or write the data using the data opcode (0x12)
   11484  *  5) Restore 769.17.2 to its original value
   11485  *
   11486  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11487  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11488  *
   11489  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11490  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11491  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11492  */
   11493 static int
   11494 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11495 	bool page_set)
   11496 {
   11497 	struct wm_softc *sc = device_private(dev);
   11498 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11499 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11500 	uint16_t wuce;
   11501 	int rv = 0;
   11502 
   11503 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11504 		device_xname(dev), __func__));
   11505 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11506 	if ((sc->sc_type == WM_T_PCH)
   11507 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11508 		device_printf(dev,
   11509 		    "Attempting to access page %d while gig enabled.\n", page);
   11510 	}
   11511 
   11512 	if (!page_set) {
   11513 		/* Enable access to PHY wakeup registers */
   11514 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11515 		if (rv != 0) {
   11516 			device_printf(dev,
   11517 			    "%s: Could not enable PHY wakeup reg access\n",
   11518 			    __func__);
   11519 			return rv;
   11520 		}
   11521 	}
   11522 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11523 		device_xname(sc->sc_dev), __func__, page, regnum));
   11524 
   11525 	/*
   11526 	 * 2) Access PHY wakeup register.
   11527 	 * See wm_access_phy_wakeup_reg_bm.
   11528 	 */
   11529 
   11530 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11531 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11532 	if (rv != 0)
   11533 		return rv;
   11534 
   11535 	if (rd) {
   11536 		/* Read the Wakeup register page value using opcode 0x12 */
   11537 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11538 	} else {
   11539 		/* Write the Wakeup register page value using opcode 0x12 */
   11540 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11541 	}
   11542 	if (rv != 0)
   11543 		return rv;
   11544 
   11545 	if (!page_set)
   11546 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11547 
   11548 	return rv;
   11549 }
   11550 
   11551 /*
   11552  * wm_gmii_hv_readreg:	[mii interface function]
   11553  *
   11554  *	Read a PHY register on the kumeran
   11555  * This could be handled by the PHY layer if we didn't have to lock the
   11556  * resource ...
   11557  */
   11558 static int
   11559 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11560 {
   11561 	struct wm_softc *sc = device_private(dev);
   11562 	int rv;
   11563 
   11564 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11565 		device_xname(dev), __func__));
   11566 	if (sc->phy.acquire(sc)) {
   11567 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11568 		return -1;
   11569 	}
   11570 
   11571 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11572 	sc->phy.release(sc);
   11573 	return rv;
   11574 }
   11575 
   11576 static int
   11577 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11578 {
   11579 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11580 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11581 	int rv;
   11582 
   11583 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11584 
   11585 	/* Page 800 works differently than the rest so it has its own func */
   11586 	if (page == BM_WUC_PAGE)
   11587 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11588 
   11589 	/*
   11590 	 * Lower than page 768 works differently than the rest so it has its
   11591 	 * own func
   11592 	 */
   11593 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11594 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11595 		return -1;
   11596 	}
   11597 
   11598 	/*
   11599 	 * XXX I21[789] documents say that the SMBus Address register is at
   11600 	 * PHY address 01, Page 0 (not 768), Register 26.
   11601 	 */
   11602 	if (page == HV_INTC_FC_PAGE_START)
   11603 		page = 0;
   11604 
   11605 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11606 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11607 		    page << BME1000_PAGE_SHIFT);
   11608 		if (rv != 0)
   11609 			return rv;
   11610 	}
   11611 
   11612 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11613 }
   11614 
   11615 /*
   11616  * wm_gmii_hv_writereg:	[mii interface function]
   11617  *
   11618  *	Write a PHY register on the kumeran.
   11619  * This could be handled by the PHY layer if we didn't have to lock the
   11620  * resource ...
   11621  */
   11622 static int
   11623 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11624 {
   11625 	struct wm_softc *sc = device_private(dev);
   11626 	int rv;
   11627 
   11628 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11629 		device_xname(dev), __func__));
   11630 
   11631 	if (sc->phy.acquire(sc)) {
   11632 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11633 		return -1;
   11634 	}
   11635 
   11636 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11637 	sc->phy.release(sc);
   11638 
   11639 	return rv;
   11640 }
   11641 
   11642 static int
   11643 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11644 {
   11645 	struct wm_softc *sc = device_private(dev);
   11646 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11647 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11648 	int rv;
   11649 
   11650 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11651 
   11652 	/* Page 800 works differently than the rest so it has its own func */
   11653 	if (page == BM_WUC_PAGE)
   11654 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11655 		    false);
   11656 
   11657 	/*
   11658 	 * Lower than page 768 works differently than the rest so it has its
   11659 	 * own func
   11660 	 */
   11661 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11662 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11663 		return -1;
   11664 	}
   11665 
   11666 	{
   11667 		/*
   11668 		 * XXX I21[789] documents say that the SMBus Address register
   11669 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11670 		 */
   11671 		if (page == HV_INTC_FC_PAGE_START)
   11672 			page = 0;
   11673 
   11674 		/*
   11675 		 * XXX Workaround MDIO accesses being disabled after entering
   11676 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11677 		 * register is set)
   11678 		 */
   11679 		if (sc->sc_phytype == WMPHY_82578) {
   11680 			struct mii_softc *child;
   11681 
   11682 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11683 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11684 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11685 			    && ((val & (1 << 11)) != 0)) {
   11686 				device_printf(dev, "XXX need workaround\n");
   11687 			}
   11688 		}
   11689 
   11690 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11691 			rv = wm_gmii_mdic_writereg(dev, 1,
   11692 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11693 			if (rv != 0)
   11694 				return rv;
   11695 		}
   11696 	}
   11697 
   11698 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11699 }
   11700 
   11701 /*
   11702  * wm_gmii_82580_readreg:	[mii interface function]
   11703  *
   11704  *	Read a PHY register on the 82580 and I350.
   11705  * This could be handled by the PHY layer if we didn't have to lock the
   11706  * resource ...
   11707  */
   11708 static int
   11709 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11710 {
   11711 	struct wm_softc *sc = device_private(dev);
   11712 	int rv;
   11713 
   11714 	if (sc->phy.acquire(sc) != 0) {
   11715 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11716 		return -1;
   11717 	}
   11718 
   11719 #ifdef DIAGNOSTIC
   11720 	if (reg > MII_ADDRMASK) {
   11721 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11722 		    __func__, sc->sc_phytype, reg);
   11723 		reg &= MII_ADDRMASK;
   11724 	}
   11725 #endif
   11726 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11727 
   11728 	sc->phy.release(sc);
   11729 	return rv;
   11730 }
   11731 
   11732 /*
   11733  * wm_gmii_82580_writereg:	[mii interface function]
   11734  *
   11735  *	Write a PHY register on the 82580 and I350.
   11736  * This could be handled by the PHY layer if we didn't have to lock the
   11737  * resource ...
   11738  */
   11739 static int
   11740 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11741 {
   11742 	struct wm_softc *sc = device_private(dev);
   11743 	int rv;
   11744 
   11745 	if (sc->phy.acquire(sc) != 0) {
   11746 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11747 		return -1;
   11748 	}
   11749 
   11750 #ifdef DIAGNOSTIC
   11751 	if (reg > MII_ADDRMASK) {
   11752 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11753 		    __func__, sc->sc_phytype, reg);
   11754 		reg &= MII_ADDRMASK;
   11755 	}
   11756 #endif
   11757 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11758 
   11759 	sc->phy.release(sc);
   11760 	return rv;
   11761 }
   11762 
   11763 /*
   11764  * wm_gmii_gs40g_readreg:	[mii interface function]
   11765  *
   11766  *	Read a PHY register on the I2100 and I211.
   11767  * This could be handled by the PHY layer if we didn't have to lock the
   11768  * resource ...
   11769  */
   11770 static int
   11771 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11772 {
   11773 	struct wm_softc *sc = device_private(dev);
   11774 	int page, offset;
   11775 	int rv;
   11776 
   11777 	/* Acquire semaphore */
   11778 	if (sc->phy.acquire(sc)) {
   11779 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11780 		return -1;
   11781 	}
   11782 
   11783 	/* Page select */
   11784 	page = reg >> GS40G_PAGE_SHIFT;
   11785 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11786 	if (rv != 0)
   11787 		goto release;
   11788 
   11789 	/* Read reg */
   11790 	offset = reg & GS40G_OFFSET_MASK;
   11791 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11792 
   11793 release:
   11794 	sc->phy.release(sc);
   11795 	return rv;
   11796 }
   11797 
   11798 /*
   11799  * wm_gmii_gs40g_writereg:	[mii interface function]
   11800  *
   11801  *	Write a PHY register on the I210 and I211.
   11802  * This could be handled by the PHY layer if we didn't have to lock the
   11803  * resource ...
   11804  */
   11805 static int
   11806 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11807 {
   11808 	struct wm_softc *sc = device_private(dev);
   11809 	uint16_t page;
   11810 	int offset, rv;
   11811 
   11812 	/* Acquire semaphore */
   11813 	if (sc->phy.acquire(sc)) {
   11814 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11815 		return -1;
   11816 	}
   11817 
   11818 	/* Page select */
   11819 	page = reg >> GS40G_PAGE_SHIFT;
   11820 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11821 	if (rv != 0)
   11822 		goto release;
   11823 
   11824 	/* Write reg */
   11825 	offset = reg & GS40G_OFFSET_MASK;
   11826 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11827 
   11828 release:
   11829 	/* Release semaphore */
   11830 	sc->phy.release(sc);
   11831 	return rv;
   11832 }
   11833 
   11834 /*
   11835  * wm_gmii_statchg:	[mii interface function]
   11836  *
   11837  *	Callback from MII layer when media changes.
   11838  */
   11839 static void
   11840 wm_gmii_statchg(struct ifnet *ifp)
   11841 {
   11842 	struct wm_softc *sc = ifp->if_softc;
   11843 	struct mii_data *mii = &sc->sc_mii;
   11844 
   11845 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11846 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11847 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11848 
   11849 	/* Get flow control negotiation result. */
   11850 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11851 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11852 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11853 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11854 	}
   11855 
   11856 	if (sc->sc_flowflags & IFM_FLOW) {
   11857 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11858 			sc->sc_ctrl |= CTRL_TFCE;
   11859 			sc->sc_fcrtl |= FCRTL_XONE;
   11860 		}
   11861 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11862 			sc->sc_ctrl |= CTRL_RFCE;
   11863 	}
   11864 
   11865 	if (mii->mii_media_active & IFM_FDX) {
   11866 		DPRINTF(sc, WM_DEBUG_LINK,
   11867 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11868 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11869 	} else {
   11870 		DPRINTF(sc, WM_DEBUG_LINK,
   11871 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11872 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11873 	}
   11874 
   11875 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11876 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11877 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11878 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11879 	if (sc->sc_type == WM_T_80003) {
   11880 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11881 		case IFM_1000_T:
   11882 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11883 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11884 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11885 			break;
   11886 		default:
   11887 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11888 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11889 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11890 			break;
   11891 		}
   11892 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11893 	}
   11894 }
   11895 
   11896 /* kumeran related (80003, ICH* and PCH*) */
   11897 
   11898 /*
   11899  * wm_kmrn_readreg:
   11900  *
   11901  *	Read a kumeran register
   11902  */
   11903 static int
   11904 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11905 {
   11906 	int rv;
   11907 
   11908 	if (sc->sc_type == WM_T_80003)
   11909 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11910 	else
   11911 		rv = sc->phy.acquire(sc);
   11912 	if (rv != 0) {
   11913 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11914 		    __func__);
   11915 		return rv;
   11916 	}
   11917 
   11918 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11919 
   11920 	if (sc->sc_type == WM_T_80003)
   11921 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11922 	else
   11923 		sc->phy.release(sc);
   11924 
   11925 	return rv;
   11926 }
   11927 
   11928 static int
   11929 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11930 {
   11931 
   11932 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11933 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11934 	    KUMCTRLSTA_REN);
   11935 	CSR_WRITE_FLUSH(sc);
   11936 	delay(2);
   11937 
   11938 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11939 
   11940 	return 0;
   11941 }
   11942 
   11943 /*
   11944  * wm_kmrn_writereg:
   11945  *
   11946  *	Write a kumeran register
   11947  */
   11948 static int
   11949 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11950 {
   11951 	int rv;
   11952 
   11953 	if (sc->sc_type == WM_T_80003)
   11954 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11955 	else
   11956 		rv = sc->phy.acquire(sc);
   11957 	if (rv != 0) {
   11958 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11959 		    __func__);
   11960 		return rv;
   11961 	}
   11962 
   11963 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11964 
   11965 	if (sc->sc_type == WM_T_80003)
   11966 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11967 	else
   11968 		sc->phy.release(sc);
   11969 
   11970 	return rv;
   11971 }
   11972 
   11973 static int
   11974 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11975 {
   11976 
   11977 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11978 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11979 
   11980 	return 0;
   11981 }
   11982 
   11983 /*
   11984  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11985  * This access method is different from IEEE MMD.
   11986  */
   11987 static int
   11988 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11989 {
   11990 	struct wm_softc *sc = device_private(dev);
   11991 	int rv;
   11992 
   11993 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11994 	if (rv != 0)
   11995 		return rv;
   11996 
   11997 	if (rd)
   11998 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11999 	else
   12000 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12001 	return rv;
   12002 }
   12003 
   12004 static int
   12005 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12006 {
   12007 
   12008 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12009 }
   12010 
   12011 static int
   12012 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12013 {
   12014 
   12015 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12016 }
   12017 
   12018 /* SGMII related */
   12019 
   12020 /*
   12021  * wm_sgmii_uses_mdio
   12022  *
   12023  * Check whether the transaction is to the internal PHY or the external
   12024  * MDIO interface. Return true if it's MDIO.
   12025  */
   12026 static bool
   12027 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12028 {
   12029 	uint32_t reg;
   12030 	bool ismdio = false;
   12031 
   12032 	switch (sc->sc_type) {
   12033 	case WM_T_82575:
   12034 	case WM_T_82576:
   12035 		reg = CSR_READ(sc, WMREG_MDIC);
   12036 		ismdio = ((reg & MDIC_DEST) != 0);
   12037 		break;
   12038 	case WM_T_82580:
   12039 	case WM_T_I350:
   12040 	case WM_T_I354:
   12041 	case WM_T_I210:
   12042 	case WM_T_I211:
   12043 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12044 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12045 		break;
   12046 	default:
   12047 		break;
   12048 	}
   12049 
   12050 	return ismdio;
   12051 }
   12052 
   12053 /* Setup internal SGMII PHY for SFP */
   12054 static void
   12055 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12056 {
   12057 	uint16_t id1, id2, phyreg;
   12058 	int i, rv;
   12059 
   12060 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12061 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12062 		return;
   12063 
   12064 	for (i = 0; i < MII_NPHY; i++) {
   12065 		sc->phy.no_errprint = true;
   12066 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12067 		if (rv != 0)
   12068 			continue;
   12069 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12070 		if (rv != 0)
   12071 			continue;
   12072 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12073 			continue;
   12074 		sc->phy.no_errprint = false;
   12075 
   12076 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12077 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12078 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12079 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12080 		break;
   12081 	}
   12082 
   12083 }
   12084 
   12085 /*
   12086  * wm_sgmii_readreg:	[mii interface function]
   12087  *
   12088  *	Read a PHY register on the SGMII
   12089  * This could be handled by the PHY layer if we didn't have to lock the
   12090  * resource ...
   12091  */
   12092 static int
   12093 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12094 {
   12095 	struct wm_softc *sc = device_private(dev);
   12096 	int rv;
   12097 
   12098 	if (sc->phy.acquire(sc)) {
   12099 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12100 		return -1;
   12101 	}
   12102 
   12103 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12104 
   12105 	sc->phy.release(sc);
   12106 	return rv;
   12107 }
   12108 
   12109 static int
   12110 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12111 {
   12112 	struct wm_softc *sc = device_private(dev);
   12113 	uint32_t i2ccmd;
   12114 	int i, rv = 0;
   12115 
   12116 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12117 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12118 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12119 
   12120 	/* Poll the ready bit */
   12121 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12122 		delay(50);
   12123 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12124 		if (i2ccmd & I2CCMD_READY)
   12125 			break;
   12126 	}
   12127 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12128 		device_printf(dev, "I2CCMD Read did not complete\n");
   12129 		rv = ETIMEDOUT;
   12130 	}
   12131 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12132 		if (!sc->phy.no_errprint)
   12133 			device_printf(dev, "I2CCMD Error bit set\n");
   12134 		rv = EIO;
   12135 	}
   12136 
   12137 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12138 
   12139 	return rv;
   12140 }
   12141 
   12142 /*
   12143  * wm_sgmii_writereg:	[mii interface function]
   12144  *
   12145  *	Write a PHY register on the SGMII.
   12146  * This could be handled by the PHY layer if we didn't have to lock the
   12147  * resource ...
   12148  */
   12149 static int
   12150 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12151 {
   12152 	struct wm_softc *sc = device_private(dev);
   12153 	int rv;
   12154 
   12155 	if (sc->phy.acquire(sc) != 0) {
   12156 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12157 		return -1;
   12158 	}
   12159 
   12160 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12161 
   12162 	sc->phy.release(sc);
   12163 
   12164 	return rv;
   12165 }
   12166 
   12167 static int
   12168 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12169 {
   12170 	struct wm_softc *sc = device_private(dev);
   12171 	uint32_t i2ccmd;
   12172 	uint16_t swapdata;
   12173 	int rv = 0;
   12174 	int i;
   12175 
   12176 	/* Swap the data bytes for the I2C interface */
   12177 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12178 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12179 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12180 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12181 
   12182 	/* Poll the ready bit */
   12183 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12184 		delay(50);
   12185 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12186 		if (i2ccmd & I2CCMD_READY)
   12187 			break;
   12188 	}
   12189 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12190 		device_printf(dev, "I2CCMD Write did not complete\n");
   12191 		rv = ETIMEDOUT;
   12192 	}
   12193 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12194 		device_printf(dev, "I2CCMD Error bit set\n");
   12195 		rv = EIO;
   12196 	}
   12197 
   12198 	return rv;
   12199 }
   12200 
   12201 /* TBI related */
   12202 
   12203 static bool
   12204 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12205 {
   12206 	bool sig;
   12207 
   12208 	sig = ctrl & CTRL_SWDPIN(1);
   12209 
   12210 	/*
   12211 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12212 	 * detect a signal, 1 if they don't.
   12213 	 */
   12214 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12215 		sig = !sig;
   12216 
   12217 	return sig;
   12218 }
   12219 
   12220 /*
   12221  * wm_tbi_mediainit:
   12222  *
   12223  *	Initialize media for use on 1000BASE-X devices.
   12224  */
   12225 static void
   12226 wm_tbi_mediainit(struct wm_softc *sc)
   12227 {
   12228 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12229 	const char *sep = "";
   12230 
   12231 	if (sc->sc_type < WM_T_82543)
   12232 		sc->sc_tipg = TIPG_WM_DFLT;
   12233 	else
   12234 		sc->sc_tipg = TIPG_LG_DFLT;
   12235 
   12236 	sc->sc_tbi_serdes_anegticks = 5;
   12237 
   12238 	/* Initialize our media structures */
   12239 	sc->sc_mii.mii_ifp = ifp;
   12240 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12241 
   12242 	ifp->if_baudrate = IF_Gbps(1);
   12243 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12244 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12245 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12246 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12247 		    sc->sc_core_lock);
   12248 	} else {
   12249 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12250 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12251 	}
   12252 
   12253 	/*
   12254 	 * SWD Pins:
   12255 	 *
   12256 	 *	0 = Link LED (output)
   12257 	 *	1 = Loss Of Signal (input)
   12258 	 */
   12259 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12260 
   12261 	/* XXX Perhaps this is only for TBI */
   12262 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12263 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12264 
   12265 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12266 		sc->sc_ctrl &= ~CTRL_LRST;
   12267 
   12268 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12269 
   12270 #define	ADD(ss, mm, dd)							\
   12271 do {									\
   12272 	aprint_normal("%s%s", sep, ss);					\
   12273 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12274 	sep = ", ";							\
   12275 } while (/*CONSTCOND*/0)
   12276 
   12277 	aprint_normal_dev(sc->sc_dev, "");
   12278 
   12279 	if (sc->sc_type == WM_T_I354) {
   12280 		uint32_t status;
   12281 
   12282 		status = CSR_READ(sc, WMREG_STATUS);
   12283 		if (((status & STATUS_2P5_SKU) != 0)
   12284 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12285 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12286 		} else
   12287 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12288 	} else if (sc->sc_type == WM_T_82545) {
   12289 		/* Only 82545 is LX (XXX except SFP) */
   12290 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12291 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12292 	} else if (sc->sc_sfptype != 0) {
   12293 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12294 		switch (sc->sc_sfptype) {
   12295 		default:
   12296 		case SFF_SFP_ETH_FLAGS_1000SX:
   12297 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12298 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12299 			break;
   12300 		case SFF_SFP_ETH_FLAGS_1000LX:
   12301 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12302 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12303 			break;
   12304 		case SFF_SFP_ETH_FLAGS_1000CX:
   12305 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12306 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12307 			break;
   12308 		case SFF_SFP_ETH_FLAGS_1000T:
   12309 			ADD("1000baseT", IFM_1000_T, 0);
   12310 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12311 			break;
   12312 		case SFF_SFP_ETH_FLAGS_100FX:
   12313 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12314 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12315 			break;
   12316 		}
   12317 	} else {
   12318 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12319 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12320 	}
   12321 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12322 	aprint_normal("\n");
   12323 
   12324 #undef ADD
   12325 
   12326 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12327 }
   12328 
   12329 /*
   12330  * wm_tbi_mediachange:	[ifmedia interface function]
   12331  *
   12332  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12333  */
   12334 static int
   12335 wm_tbi_mediachange(struct ifnet *ifp)
   12336 {
   12337 	struct wm_softc *sc = ifp->if_softc;
   12338 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12339 	uint32_t status, ctrl;
   12340 	bool signal;
   12341 	int i;
   12342 
   12343 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12344 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12345 		/* XXX need some work for >= 82571 and < 82575 */
   12346 		if (sc->sc_type < WM_T_82575)
   12347 			return 0;
   12348 	}
   12349 
   12350 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12351 	    || (sc->sc_type >= WM_T_82575))
   12352 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12353 
   12354 	sc->sc_ctrl &= ~CTRL_LRST;
   12355 	sc->sc_txcw = TXCW_ANE;
   12356 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12357 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12358 	else if (ife->ifm_media & IFM_FDX)
   12359 		sc->sc_txcw |= TXCW_FD;
   12360 	else
   12361 		sc->sc_txcw |= TXCW_HD;
   12362 
   12363 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12364 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12365 
   12366 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12367 		device_xname(sc->sc_dev), sc->sc_txcw));
   12368 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12369 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12370 	CSR_WRITE_FLUSH(sc);
   12371 	delay(1000);
   12372 
   12373 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12374 	signal = wm_tbi_havesignal(sc, ctrl);
   12375 
   12376 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12377 		signal));
   12378 
   12379 	if (signal) {
   12380 		/* Have signal; wait for the link to come up. */
   12381 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12382 			delay(10000);
   12383 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12384 				break;
   12385 		}
   12386 
   12387 		DPRINTF(sc, WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12388 			device_xname(sc->sc_dev), i));
   12389 
   12390 		status = CSR_READ(sc, WMREG_STATUS);
   12391 		DPRINTF(sc, WM_DEBUG_LINK,
   12392 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12393 			device_xname(sc->sc_dev), status, STATUS_LU));
   12394 		if (status & STATUS_LU) {
   12395 			/* Link is up. */
   12396 			DPRINTF(sc, WM_DEBUG_LINK,
   12397 			    ("%s: LINK: set media -> link up %s\n",
   12398 				device_xname(sc->sc_dev),
   12399 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12400 
   12401 			/*
   12402 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12403 			 * so we should update sc->sc_ctrl
   12404 			 */
   12405 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12406 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12407 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12408 			if (status & STATUS_FD)
   12409 				sc->sc_tctl |=
   12410 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12411 			else
   12412 				sc->sc_tctl |=
   12413 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12414 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12415 				sc->sc_fcrtl |= FCRTL_XONE;
   12416 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12417 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12418 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12419 			sc->sc_tbi_linkup = 1;
   12420 		} else {
   12421 			if (i == WM_LINKUP_TIMEOUT)
   12422 				wm_check_for_link(sc);
   12423 			/* Link is down. */
   12424 			DPRINTF(sc, WM_DEBUG_LINK,
   12425 			    ("%s: LINK: set media -> link down\n",
   12426 				device_xname(sc->sc_dev)));
   12427 			sc->sc_tbi_linkup = 0;
   12428 		}
   12429 	} else {
   12430 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12431 			device_xname(sc->sc_dev)));
   12432 		sc->sc_tbi_linkup = 0;
   12433 	}
   12434 
   12435 	wm_tbi_serdes_set_linkled(sc);
   12436 
   12437 	return 0;
   12438 }
   12439 
   12440 /*
   12441  * wm_tbi_mediastatus:	[ifmedia interface function]
   12442  *
   12443  *	Get the current interface media status on a 1000BASE-X device.
   12444  */
   12445 static void
   12446 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12447 {
   12448 	struct wm_softc *sc = ifp->if_softc;
   12449 	uint32_t ctrl, status;
   12450 
   12451 	ifmr->ifm_status = IFM_AVALID;
   12452 	ifmr->ifm_active = IFM_ETHER;
   12453 
   12454 	status = CSR_READ(sc, WMREG_STATUS);
   12455 	if ((status & STATUS_LU) == 0) {
   12456 		ifmr->ifm_active |= IFM_NONE;
   12457 		return;
   12458 	}
   12459 
   12460 	ifmr->ifm_status |= IFM_ACTIVE;
   12461 	/* Only 82545 is LX */
   12462 	if (sc->sc_type == WM_T_82545)
   12463 		ifmr->ifm_active |= IFM_1000_LX;
   12464 	else
   12465 		ifmr->ifm_active |= IFM_1000_SX;
   12466 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12467 		ifmr->ifm_active |= IFM_FDX;
   12468 	else
   12469 		ifmr->ifm_active |= IFM_HDX;
   12470 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12471 	if (ctrl & CTRL_RFCE)
   12472 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12473 	if (ctrl & CTRL_TFCE)
   12474 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12475 }
   12476 
   12477 /* XXX TBI only */
   12478 static int
   12479 wm_check_for_link(struct wm_softc *sc)
   12480 {
   12481 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12482 	uint32_t rxcw;
   12483 	uint32_t ctrl;
   12484 	uint32_t status;
   12485 	bool signal;
   12486 
   12487 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   12488 		device_xname(sc->sc_dev), __func__));
   12489 
   12490 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12491 		/* XXX need some work for >= 82571 */
   12492 		if (sc->sc_type >= WM_T_82571) {
   12493 			sc->sc_tbi_linkup = 1;
   12494 			return 0;
   12495 		}
   12496 	}
   12497 
   12498 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12499 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12500 	status = CSR_READ(sc, WMREG_STATUS);
   12501 	signal = wm_tbi_havesignal(sc, ctrl);
   12502 
   12503 	DPRINTF(sc, WM_DEBUG_LINK,
   12504 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12505 		device_xname(sc->sc_dev), __func__, signal,
   12506 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12507 
   12508 	/*
   12509 	 * SWDPIN   LU RXCW
   12510 	 *	0    0	  0
   12511 	 *	0    0	  1	(should not happen)
   12512 	 *	0    1	  0	(should not happen)
   12513 	 *	0    1	  1	(should not happen)
   12514 	 *	1    0	  0	Disable autonego and force linkup
   12515 	 *	1    0	  1	got /C/ but not linkup yet
   12516 	 *	1    1	  0	(linkup)
   12517 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12518 	 *
   12519 	 */
   12520 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12521 		DPRINTF(sc, WM_DEBUG_LINK,
   12522 		    ("%s: %s: force linkup and fullduplex\n",
   12523 			device_xname(sc->sc_dev), __func__));
   12524 		sc->sc_tbi_linkup = 0;
   12525 		/* Disable auto-negotiation in the TXCW register */
   12526 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12527 
   12528 		/*
   12529 		 * Force link-up and also force full-duplex.
   12530 		 *
   12531 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12532 		 * so we should update sc->sc_ctrl
   12533 		 */
   12534 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12535 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12536 	} else if (((status & STATUS_LU) != 0)
   12537 	    && ((rxcw & RXCW_C) != 0)
   12538 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12539 		sc->sc_tbi_linkup = 1;
   12540 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12541 			device_xname(sc->sc_dev),
   12542 			__func__));
   12543 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12544 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12545 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12546 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   12547 			device_xname(sc->sc_dev), __func__));
   12548 	} else {
   12549 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12550 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12551 			status));
   12552 	}
   12553 
   12554 	return 0;
   12555 }
   12556 
   12557 /*
   12558  * wm_tbi_tick:
   12559  *
   12560  *	Check the link on TBI devices.
   12561  *	This function acts as mii_tick().
   12562  */
   12563 static void
   12564 wm_tbi_tick(struct wm_softc *sc)
   12565 {
   12566 	struct mii_data *mii = &sc->sc_mii;
   12567 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12568 	uint32_t status;
   12569 
   12570 	KASSERT(WM_CORE_LOCKED(sc));
   12571 
   12572 	status = CSR_READ(sc, WMREG_STATUS);
   12573 
   12574 	/* XXX is this needed? */
   12575 	(void)CSR_READ(sc, WMREG_RXCW);
   12576 	(void)CSR_READ(sc, WMREG_CTRL);
   12577 
   12578 	/* set link status */
   12579 	if ((status & STATUS_LU) == 0) {
   12580 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12581 			device_xname(sc->sc_dev)));
   12582 		sc->sc_tbi_linkup = 0;
   12583 	} else if (sc->sc_tbi_linkup == 0) {
   12584 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12585 			device_xname(sc->sc_dev),
   12586 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12587 		sc->sc_tbi_linkup = 1;
   12588 		sc->sc_tbi_serdes_ticks = 0;
   12589 	}
   12590 
   12591 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12592 		goto setled;
   12593 
   12594 	if ((status & STATUS_LU) == 0) {
   12595 		sc->sc_tbi_linkup = 0;
   12596 		/* If the timer expired, retry autonegotiation */
   12597 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12598 		    && (++sc->sc_tbi_serdes_ticks
   12599 			>= sc->sc_tbi_serdes_anegticks)) {
   12600 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12601 				device_xname(sc->sc_dev), __func__));
   12602 			sc->sc_tbi_serdes_ticks = 0;
   12603 			/*
   12604 			 * Reset the link, and let autonegotiation do
   12605 			 * its thing
   12606 			 */
   12607 			sc->sc_ctrl |= CTRL_LRST;
   12608 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12609 			CSR_WRITE_FLUSH(sc);
   12610 			delay(1000);
   12611 			sc->sc_ctrl &= ~CTRL_LRST;
   12612 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12613 			CSR_WRITE_FLUSH(sc);
   12614 			delay(1000);
   12615 			CSR_WRITE(sc, WMREG_TXCW,
   12616 			    sc->sc_txcw & ~TXCW_ANE);
   12617 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12618 		}
   12619 	}
   12620 
   12621 setled:
   12622 	wm_tbi_serdes_set_linkled(sc);
   12623 }
   12624 
   12625 /* SERDES related */
   12626 static void
   12627 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12628 {
   12629 	uint32_t reg;
   12630 
   12631 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12632 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12633 		return;
   12634 
   12635 	/* Enable PCS to turn on link */
   12636 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12637 	reg |= PCS_CFG_PCS_EN;
   12638 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12639 
   12640 	/* Power up the laser */
   12641 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12642 	reg &= ~CTRL_EXT_SWDPIN(3);
   12643 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12644 
   12645 	/* Flush the write to verify completion */
   12646 	CSR_WRITE_FLUSH(sc);
   12647 	delay(1000);
   12648 }
   12649 
   12650 static int
   12651 wm_serdes_mediachange(struct ifnet *ifp)
   12652 {
   12653 	struct wm_softc *sc = ifp->if_softc;
   12654 	bool pcs_autoneg = true; /* XXX */
   12655 	uint32_t ctrl_ext, pcs_lctl, reg;
   12656 
   12657 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12658 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12659 		return 0;
   12660 
   12661 	/* XXX Currently, this function is not called on 8257[12] */
   12662 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12663 	    || (sc->sc_type >= WM_T_82575))
   12664 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12665 
   12666 	/* Power on the sfp cage if present */
   12667 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12668 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12669 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12670 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12671 
   12672 	sc->sc_ctrl |= CTRL_SLU;
   12673 
   12674 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   12675 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12676 
   12677 		reg = CSR_READ(sc, WMREG_CONNSW);
   12678 		reg |= CONNSW_ENRGSRC;
   12679 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   12680 	}
   12681 
   12682 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12683 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12684 	case CTRL_EXT_LINK_MODE_SGMII:
   12685 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12686 		pcs_autoneg = true;
   12687 		/* Autoneg time out should be disabled for SGMII mode */
   12688 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12689 		break;
   12690 	case CTRL_EXT_LINK_MODE_1000KX:
   12691 		pcs_autoneg = false;
   12692 		/* FALLTHROUGH */
   12693 	default:
   12694 		if ((sc->sc_type == WM_T_82575)
   12695 		    || (sc->sc_type == WM_T_82576)) {
   12696 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12697 				pcs_autoneg = false;
   12698 		}
   12699 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12700 		    | CTRL_FRCFDX;
   12701 
   12702 		/* Set speed of 1000/Full if speed/duplex is forced */
   12703 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12704 	}
   12705 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12706 
   12707 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12708 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12709 
   12710 	if (pcs_autoneg) {
   12711 		/* Set PCS register for autoneg */
   12712 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12713 
   12714 		/* Disable force flow control for autoneg */
   12715 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12716 
   12717 		/* Configure flow control advertisement for autoneg */
   12718 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12719 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12720 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12721 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12722 	} else
   12723 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12724 
   12725 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12726 
   12727 	return 0;
   12728 }
   12729 
   12730 static void
   12731 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12732 {
   12733 	struct wm_softc *sc = ifp->if_softc;
   12734 	struct mii_data *mii = &sc->sc_mii;
   12735 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12736 	uint32_t pcs_adv, pcs_lpab, reg;
   12737 
   12738 	ifmr->ifm_status = IFM_AVALID;
   12739 	ifmr->ifm_active = IFM_ETHER;
   12740 
   12741 	/* Check PCS */
   12742 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12743 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12744 		ifmr->ifm_active |= IFM_NONE;
   12745 		sc->sc_tbi_linkup = 0;
   12746 		goto setled;
   12747 	}
   12748 
   12749 	sc->sc_tbi_linkup = 1;
   12750 	ifmr->ifm_status |= IFM_ACTIVE;
   12751 	if (sc->sc_type == WM_T_I354) {
   12752 		uint32_t status;
   12753 
   12754 		status = CSR_READ(sc, WMREG_STATUS);
   12755 		if (((status & STATUS_2P5_SKU) != 0)
   12756 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12757 			ifmr->ifm_active |= IFM_2500_KX;
   12758 		} else
   12759 			ifmr->ifm_active |= IFM_1000_KX;
   12760 	} else {
   12761 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12762 		case PCS_LSTS_SPEED_10:
   12763 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12764 			break;
   12765 		case PCS_LSTS_SPEED_100:
   12766 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12767 			break;
   12768 		case PCS_LSTS_SPEED_1000:
   12769 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12770 			break;
   12771 		default:
   12772 			device_printf(sc->sc_dev, "Unknown speed\n");
   12773 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12774 			break;
   12775 		}
   12776 	}
   12777 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   12778 	if ((reg & PCS_LSTS_FDX) != 0)
   12779 		ifmr->ifm_active |= IFM_FDX;
   12780 	else
   12781 		ifmr->ifm_active |= IFM_HDX;
   12782 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12783 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12784 		/* Check flow */
   12785 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12786 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12787 			DPRINTF(sc, WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12788 			goto setled;
   12789 		}
   12790 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12791 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12792 		DPRINTF(sc, WM_DEBUG_LINK,
   12793 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12794 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12795 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12796 			mii->mii_media_active |= IFM_FLOW
   12797 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12798 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12799 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12800 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12801 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12802 			mii->mii_media_active |= IFM_FLOW
   12803 			    | IFM_ETH_TXPAUSE;
   12804 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12805 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12806 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12807 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12808 			mii->mii_media_active |= IFM_FLOW
   12809 			    | IFM_ETH_RXPAUSE;
   12810 		}
   12811 	}
   12812 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12813 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12814 setled:
   12815 	wm_tbi_serdes_set_linkled(sc);
   12816 }
   12817 
   12818 /*
   12819  * wm_serdes_tick:
   12820  *
   12821  *	Check the link on serdes devices.
   12822  */
   12823 static void
   12824 wm_serdes_tick(struct wm_softc *sc)
   12825 {
   12826 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12827 	struct mii_data *mii = &sc->sc_mii;
   12828 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12829 	uint32_t reg;
   12830 
   12831 	KASSERT(WM_CORE_LOCKED(sc));
   12832 
   12833 	mii->mii_media_status = IFM_AVALID;
   12834 	mii->mii_media_active = IFM_ETHER;
   12835 
   12836 	/* Check PCS */
   12837 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12838 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12839 		mii->mii_media_status |= IFM_ACTIVE;
   12840 		sc->sc_tbi_linkup = 1;
   12841 		sc->sc_tbi_serdes_ticks = 0;
   12842 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12843 		if ((reg & PCS_LSTS_FDX) != 0)
   12844 			mii->mii_media_active |= IFM_FDX;
   12845 		else
   12846 			mii->mii_media_active |= IFM_HDX;
   12847 	} else {
   12848 		mii->mii_media_status |= IFM_NONE;
   12849 		sc->sc_tbi_linkup = 0;
   12850 		/* If the timer expired, retry autonegotiation */
   12851 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12852 		    && (++sc->sc_tbi_serdes_ticks
   12853 			>= sc->sc_tbi_serdes_anegticks)) {
   12854 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12855 				device_xname(sc->sc_dev), __func__));
   12856 			sc->sc_tbi_serdes_ticks = 0;
   12857 			/* XXX */
   12858 			wm_serdes_mediachange(ifp);
   12859 		}
   12860 	}
   12861 
   12862 	wm_tbi_serdes_set_linkled(sc);
   12863 }
   12864 
   12865 /* SFP related */
   12866 
   12867 static int
   12868 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12869 {
   12870 	uint32_t i2ccmd;
   12871 	int i;
   12872 
   12873 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12874 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12875 
   12876 	/* Poll the ready bit */
   12877 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12878 		delay(50);
   12879 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12880 		if (i2ccmd & I2CCMD_READY)
   12881 			break;
   12882 	}
   12883 	if ((i2ccmd & I2CCMD_READY) == 0)
   12884 		return -1;
   12885 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12886 		return -1;
   12887 
   12888 	*data = i2ccmd & 0x00ff;
   12889 
   12890 	return 0;
   12891 }
   12892 
   12893 static uint32_t
   12894 wm_sfp_get_media_type(struct wm_softc *sc)
   12895 {
   12896 	uint32_t ctrl_ext;
   12897 	uint8_t val = 0;
   12898 	int timeout = 3;
   12899 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12900 	int rv = -1;
   12901 
   12902 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12903 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12904 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12905 	CSR_WRITE_FLUSH(sc);
   12906 
   12907 	/* Read SFP module data */
   12908 	while (timeout) {
   12909 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12910 		if (rv == 0)
   12911 			break;
   12912 		delay(100*1000); /* XXX too big */
   12913 		timeout--;
   12914 	}
   12915 	if (rv != 0)
   12916 		goto out;
   12917 
   12918 	switch (val) {
   12919 	case SFF_SFP_ID_SFF:
   12920 		aprint_normal_dev(sc->sc_dev,
   12921 		    "Module/Connector soldered to board\n");
   12922 		break;
   12923 	case SFF_SFP_ID_SFP:
   12924 		sc->sc_flags |= WM_F_SFP;
   12925 		break;
   12926 	case SFF_SFP_ID_UNKNOWN:
   12927 		goto out;
   12928 	default:
   12929 		break;
   12930 	}
   12931 
   12932 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12933 	if (rv != 0)
   12934 		goto out;
   12935 
   12936 	sc->sc_sfptype = val;
   12937 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12938 		mediatype = WM_MEDIATYPE_SERDES;
   12939 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12940 		sc->sc_flags |= WM_F_SGMII;
   12941 		mediatype = WM_MEDIATYPE_COPPER;
   12942 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12943 		sc->sc_flags |= WM_F_SGMII;
   12944 		mediatype = WM_MEDIATYPE_SERDES;
   12945 	} else {
   12946 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   12947 		    __func__, sc->sc_sfptype);
   12948 		sc->sc_sfptype = 0; /* XXX unknown */
   12949 	}
   12950 
   12951 out:
   12952 	/* Restore I2C interface setting */
   12953 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12954 
   12955 	return mediatype;
   12956 }
   12957 
   12958 /*
   12959  * NVM related.
   12960  * Microwire, SPI (w/wo EERD) and Flash.
   12961  */
   12962 
   12963 /* Both spi and uwire */
   12964 
   12965 /*
   12966  * wm_eeprom_sendbits:
   12967  *
   12968  *	Send a series of bits to the EEPROM.
   12969  */
   12970 static void
   12971 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12972 {
   12973 	uint32_t reg;
   12974 	int x;
   12975 
   12976 	reg = CSR_READ(sc, WMREG_EECD);
   12977 
   12978 	for (x = nbits; x > 0; x--) {
   12979 		if (bits & (1U << (x - 1)))
   12980 			reg |= EECD_DI;
   12981 		else
   12982 			reg &= ~EECD_DI;
   12983 		CSR_WRITE(sc, WMREG_EECD, reg);
   12984 		CSR_WRITE_FLUSH(sc);
   12985 		delay(2);
   12986 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12987 		CSR_WRITE_FLUSH(sc);
   12988 		delay(2);
   12989 		CSR_WRITE(sc, WMREG_EECD, reg);
   12990 		CSR_WRITE_FLUSH(sc);
   12991 		delay(2);
   12992 	}
   12993 }
   12994 
   12995 /*
   12996  * wm_eeprom_recvbits:
   12997  *
   12998  *	Receive a series of bits from the EEPROM.
   12999  */
   13000 static void
   13001 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13002 {
   13003 	uint32_t reg, val;
   13004 	int x;
   13005 
   13006 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13007 
   13008 	val = 0;
   13009 	for (x = nbits; x > 0; x--) {
   13010 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13011 		CSR_WRITE_FLUSH(sc);
   13012 		delay(2);
   13013 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13014 			val |= (1U << (x - 1));
   13015 		CSR_WRITE(sc, WMREG_EECD, reg);
   13016 		CSR_WRITE_FLUSH(sc);
   13017 		delay(2);
   13018 	}
   13019 	*valp = val;
   13020 }
   13021 
   13022 /* Microwire */
   13023 
   13024 /*
   13025  * wm_nvm_read_uwire:
   13026  *
   13027  *	Read a word from the EEPROM using the MicroWire protocol.
   13028  */
   13029 static int
   13030 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13031 {
   13032 	uint32_t reg, val;
   13033 	int i;
   13034 
   13035 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13036 		device_xname(sc->sc_dev), __func__));
   13037 
   13038 	if (sc->nvm.acquire(sc) != 0)
   13039 		return -1;
   13040 
   13041 	for (i = 0; i < wordcnt; i++) {
   13042 		/* Clear SK and DI. */
   13043 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13044 		CSR_WRITE(sc, WMREG_EECD, reg);
   13045 
   13046 		/*
   13047 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13048 		 * and Xen.
   13049 		 *
   13050 		 * We use this workaround only for 82540 because qemu's
   13051 		 * e1000 act as 82540.
   13052 		 */
   13053 		if (sc->sc_type == WM_T_82540) {
   13054 			reg |= EECD_SK;
   13055 			CSR_WRITE(sc, WMREG_EECD, reg);
   13056 			reg &= ~EECD_SK;
   13057 			CSR_WRITE(sc, WMREG_EECD, reg);
   13058 			CSR_WRITE_FLUSH(sc);
   13059 			delay(2);
   13060 		}
   13061 		/* XXX: end of workaround */
   13062 
   13063 		/* Set CHIP SELECT. */
   13064 		reg |= EECD_CS;
   13065 		CSR_WRITE(sc, WMREG_EECD, reg);
   13066 		CSR_WRITE_FLUSH(sc);
   13067 		delay(2);
   13068 
   13069 		/* Shift in the READ command. */
   13070 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13071 
   13072 		/* Shift in address. */
   13073 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13074 
   13075 		/* Shift out the data. */
   13076 		wm_eeprom_recvbits(sc, &val, 16);
   13077 		data[i] = val & 0xffff;
   13078 
   13079 		/* Clear CHIP SELECT. */
   13080 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13081 		CSR_WRITE(sc, WMREG_EECD, reg);
   13082 		CSR_WRITE_FLUSH(sc);
   13083 		delay(2);
   13084 	}
   13085 
   13086 	sc->nvm.release(sc);
   13087 	return 0;
   13088 }
   13089 
   13090 /* SPI */
   13091 
   13092 /*
   13093  * Set SPI and FLASH related information from the EECD register.
   13094  * For 82541 and 82547, the word size is taken from EEPROM.
   13095  */
   13096 static int
   13097 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13098 {
   13099 	int size;
   13100 	uint32_t reg;
   13101 	uint16_t data;
   13102 
   13103 	reg = CSR_READ(sc, WMREG_EECD);
   13104 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13105 
   13106 	/* Read the size of NVM from EECD by default */
   13107 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13108 	switch (sc->sc_type) {
   13109 	case WM_T_82541:
   13110 	case WM_T_82541_2:
   13111 	case WM_T_82547:
   13112 	case WM_T_82547_2:
   13113 		/* Set dummy value to access EEPROM */
   13114 		sc->sc_nvm_wordsize = 64;
   13115 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13116 			aprint_error_dev(sc->sc_dev,
   13117 			    "%s: failed to read EEPROM size\n", __func__);
   13118 		}
   13119 		reg = data;
   13120 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13121 		if (size == 0)
   13122 			size = 6; /* 64 word size */
   13123 		else
   13124 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13125 		break;
   13126 	case WM_T_80003:
   13127 	case WM_T_82571:
   13128 	case WM_T_82572:
   13129 	case WM_T_82573: /* SPI case */
   13130 	case WM_T_82574: /* SPI case */
   13131 	case WM_T_82583: /* SPI case */
   13132 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13133 		if (size > 14)
   13134 			size = 14;
   13135 		break;
   13136 	case WM_T_82575:
   13137 	case WM_T_82576:
   13138 	case WM_T_82580:
   13139 	case WM_T_I350:
   13140 	case WM_T_I354:
   13141 	case WM_T_I210:
   13142 	case WM_T_I211:
   13143 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13144 		if (size > 15)
   13145 			size = 15;
   13146 		break;
   13147 	default:
   13148 		aprint_error_dev(sc->sc_dev,
   13149 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13150 		return -1;
   13151 		break;
   13152 	}
   13153 
   13154 	sc->sc_nvm_wordsize = 1 << size;
   13155 
   13156 	return 0;
   13157 }
   13158 
   13159 /*
   13160  * wm_nvm_ready_spi:
   13161  *
   13162  *	Wait for a SPI EEPROM to be ready for commands.
   13163  */
   13164 static int
   13165 wm_nvm_ready_spi(struct wm_softc *sc)
   13166 {
   13167 	uint32_t val;
   13168 	int usec;
   13169 
   13170 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13171 		device_xname(sc->sc_dev), __func__));
   13172 
   13173 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13174 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13175 		wm_eeprom_recvbits(sc, &val, 8);
   13176 		if ((val & SPI_SR_RDY) == 0)
   13177 			break;
   13178 	}
   13179 	if (usec >= SPI_MAX_RETRIES) {
   13180 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13181 		return -1;
   13182 	}
   13183 	return 0;
   13184 }
   13185 
   13186 /*
   13187  * wm_nvm_read_spi:
   13188  *
   13189  *	Read a work from the EEPROM using the SPI protocol.
   13190  */
   13191 static int
   13192 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13193 {
   13194 	uint32_t reg, val;
   13195 	int i;
   13196 	uint8_t opc;
   13197 	int rv = 0;
   13198 
   13199 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13200 		device_xname(sc->sc_dev), __func__));
   13201 
   13202 	if (sc->nvm.acquire(sc) != 0)
   13203 		return -1;
   13204 
   13205 	/* Clear SK and CS. */
   13206 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13207 	CSR_WRITE(sc, WMREG_EECD, reg);
   13208 	CSR_WRITE_FLUSH(sc);
   13209 	delay(2);
   13210 
   13211 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13212 		goto out;
   13213 
   13214 	/* Toggle CS to flush commands. */
   13215 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13216 	CSR_WRITE_FLUSH(sc);
   13217 	delay(2);
   13218 	CSR_WRITE(sc, WMREG_EECD, reg);
   13219 	CSR_WRITE_FLUSH(sc);
   13220 	delay(2);
   13221 
   13222 	opc = SPI_OPC_READ;
   13223 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13224 		opc |= SPI_OPC_A8;
   13225 
   13226 	wm_eeprom_sendbits(sc, opc, 8);
   13227 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13228 
   13229 	for (i = 0; i < wordcnt; i++) {
   13230 		wm_eeprom_recvbits(sc, &val, 16);
   13231 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13232 	}
   13233 
   13234 	/* Raise CS and clear SK. */
   13235 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13236 	CSR_WRITE(sc, WMREG_EECD, reg);
   13237 	CSR_WRITE_FLUSH(sc);
   13238 	delay(2);
   13239 
   13240 out:
   13241 	sc->nvm.release(sc);
   13242 	return rv;
   13243 }
   13244 
   13245 /* Using with EERD */
   13246 
   13247 static int
   13248 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13249 {
   13250 	uint32_t attempts = 100000;
   13251 	uint32_t i, reg = 0;
   13252 	int32_t done = -1;
   13253 
   13254 	for (i = 0; i < attempts; i++) {
   13255 		reg = CSR_READ(sc, rw);
   13256 
   13257 		if (reg & EERD_DONE) {
   13258 			done = 0;
   13259 			break;
   13260 		}
   13261 		delay(5);
   13262 	}
   13263 
   13264 	return done;
   13265 }
   13266 
   13267 static int
   13268 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13269 {
   13270 	int i, eerd = 0;
   13271 	int rv = 0;
   13272 
   13273 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13274 		device_xname(sc->sc_dev), __func__));
   13275 
   13276 	if (sc->nvm.acquire(sc) != 0)
   13277 		return -1;
   13278 
   13279 	for (i = 0; i < wordcnt; i++) {
   13280 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13281 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13282 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13283 		if (rv != 0) {
   13284 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13285 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13286 			break;
   13287 		}
   13288 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13289 	}
   13290 
   13291 	sc->nvm.release(sc);
   13292 	return rv;
   13293 }
   13294 
   13295 /* Flash */
   13296 
   13297 static int
   13298 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13299 {
   13300 	uint32_t eecd;
   13301 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13302 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13303 	uint32_t nvm_dword = 0;
   13304 	uint8_t sig_byte = 0;
   13305 	int rv;
   13306 
   13307 	switch (sc->sc_type) {
   13308 	case WM_T_PCH_SPT:
   13309 	case WM_T_PCH_CNP:
   13310 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13311 		act_offset = ICH_NVM_SIG_WORD * 2;
   13312 
   13313 		/* Set bank to 0 in case flash read fails. */
   13314 		*bank = 0;
   13315 
   13316 		/* Check bank 0 */
   13317 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13318 		if (rv != 0)
   13319 			return rv;
   13320 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13321 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13322 			*bank = 0;
   13323 			return 0;
   13324 		}
   13325 
   13326 		/* Check bank 1 */
   13327 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13328 		    &nvm_dword);
   13329 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13330 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13331 			*bank = 1;
   13332 			return 0;
   13333 		}
   13334 		aprint_error_dev(sc->sc_dev,
   13335 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13336 		return -1;
   13337 	case WM_T_ICH8:
   13338 	case WM_T_ICH9:
   13339 		eecd = CSR_READ(sc, WMREG_EECD);
   13340 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13341 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13342 			return 0;
   13343 		}
   13344 		/* FALLTHROUGH */
   13345 	default:
   13346 		/* Default to 0 */
   13347 		*bank = 0;
   13348 
   13349 		/* Check bank 0 */
   13350 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13351 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13352 			*bank = 0;
   13353 			return 0;
   13354 		}
   13355 
   13356 		/* Check bank 1 */
   13357 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13358 		    &sig_byte);
   13359 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13360 			*bank = 1;
   13361 			return 0;
   13362 		}
   13363 	}
   13364 
   13365 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13366 		device_xname(sc->sc_dev)));
   13367 	return -1;
   13368 }
   13369 
   13370 /******************************************************************************
   13371  * This function does initial flash setup so that a new read/write/erase cycle
   13372  * can be started.
   13373  *
   13374  * sc - The pointer to the hw structure
   13375  ****************************************************************************/
   13376 static int32_t
   13377 wm_ich8_cycle_init(struct wm_softc *sc)
   13378 {
   13379 	uint16_t hsfsts;
   13380 	int32_t error = 1;
   13381 	int32_t i     = 0;
   13382 
   13383 	if (sc->sc_type >= WM_T_PCH_SPT)
   13384 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13385 	else
   13386 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13387 
   13388 	/* May be check the Flash Des Valid bit in Hw status */
   13389 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13390 		return error;
   13391 
   13392 	/* Clear FCERR in Hw status by writing 1 */
   13393 	/* Clear DAEL in Hw status by writing a 1 */
   13394 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13395 
   13396 	if (sc->sc_type >= WM_T_PCH_SPT)
   13397 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13398 	else
   13399 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13400 
   13401 	/*
   13402 	 * Either we should have a hardware SPI cycle in progress bit to check
   13403 	 * against, in order to start a new cycle or FDONE bit should be
   13404 	 * changed in the hardware so that it is 1 after hardware reset, which
   13405 	 * can then be used as an indication whether a cycle is in progress or
   13406 	 * has been completed .. we should also have some software semaphore
   13407 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13408 	 * threads access to those bits can be sequentiallized or a way so that
   13409 	 * 2 threads don't start the cycle at the same time
   13410 	 */
   13411 
   13412 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13413 		/*
   13414 		 * There is no cycle running at present, so we can start a
   13415 		 * cycle
   13416 		 */
   13417 
   13418 		/* Begin by setting Flash Cycle Done. */
   13419 		hsfsts |= HSFSTS_DONE;
   13420 		if (sc->sc_type >= WM_T_PCH_SPT)
   13421 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13422 			    hsfsts & 0xffffUL);
   13423 		else
   13424 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13425 		error = 0;
   13426 	} else {
   13427 		/*
   13428 		 * Otherwise poll for sometime so the current cycle has a
   13429 		 * chance to end before giving up.
   13430 		 */
   13431 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13432 			if (sc->sc_type >= WM_T_PCH_SPT)
   13433 				hsfsts = ICH8_FLASH_READ32(sc,
   13434 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13435 			else
   13436 				hsfsts = ICH8_FLASH_READ16(sc,
   13437 				    ICH_FLASH_HSFSTS);
   13438 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13439 				error = 0;
   13440 				break;
   13441 			}
   13442 			delay(1);
   13443 		}
   13444 		if (error == 0) {
   13445 			/*
   13446 			 * Successful in waiting for previous cycle to timeout,
   13447 			 * now set the Flash Cycle Done.
   13448 			 */
   13449 			hsfsts |= HSFSTS_DONE;
   13450 			if (sc->sc_type >= WM_T_PCH_SPT)
   13451 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13452 				    hsfsts & 0xffffUL);
   13453 			else
   13454 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13455 				    hsfsts);
   13456 		}
   13457 	}
   13458 	return error;
   13459 }
   13460 
   13461 /******************************************************************************
   13462  * This function starts a flash cycle and waits for its completion
   13463  *
   13464  * sc - The pointer to the hw structure
   13465  ****************************************************************************/
   13466 static int32_t
   13467 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13468 {
   13469 	uint16_t hsflctl;
   13470 	uint16_t hsfsts;
   13471 	int32_t error = 1;
   13472 	uint32_t i = 0;
   13473 
   13474 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13475 	if (sc->sc_type >= WM_T_PCH_SPT)
   13476 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13477 	else
   13478 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13479 	hsflctl |= HSFCTL_GO;
   13480 	if (sc->sc_type >= WM_T_PCH_SPT)
   13481 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13482 		    (uint32_t)hsflctl << 16);
   13483 	else
   13484 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13485 
   13486 	/* Wait till FDONE bit is set to 1 */
   13487 	do {
   13488 		if (sc->sc_type >= WM_T_PCH_SPT)
   13489 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13490 			    & 0xffffUL;
   13491 		else
   13492 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13493 		if (hsfsts & HSFSTS_DONE)
   13494 			break;
   13495 		delay(1);
   13496 		i++;
   13497 	} while (i < timeout);
   13498 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13499 		error = 0;
   13500 
   13501 	return error;
   13502 }
   13503 
   13504 /******************************************************************************
   13505  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13506  *
   13507  * sc - The pointer to the hw structure
   13508  * index - The index of the byte or word to read.
   13509  * size - Size of data to read, 1=byte 2=word, 4=dword
   13510  * data - Pointer to the word to store the value read.
   13511  *****************************************************************************/
   13512 static int32_t
   13513 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13514     uint32_t size, uint32_t *data)
   13515 {
   13516 	uint16_t hsfsts;
   13517 	uint16_t hsflctl;
   13518 	uint32_t flash_linear_address;
   13519 	uint32_t flash_data = 0;
   13520 	int32_t error = 1;
   13521 	int32_t count = 0;
   13522 
   13523 	if (size < 1  || size > 4 || data == 0x0 ||
   13524 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13525 		return error;
   13526 
   13527 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13528 	    sc->sc_ich8_flash_base;
   13529 
   13530 	do {
   13531 		delay(1);
   13532 		/* Steps */
   13533 		error = wm_ich8_cycle_init(sc);
   13534 		if (error)
   13535 			break;
   13536 
   13537 		if (sc->sc_type >= WM_T_PCH_SPT)
   13538 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13539 			    >> 16;
   13540 		else
   13541 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13542 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13543 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13544 		    & HSFCTL_BCOUNT_MASK;
   13545 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13546 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13547 			/*
   13548 			 * In SPT, This register is in Lan memory space, not
   13549 			 * flash. Therefore, only 32 bit access is supported.
   13550 			 */
   13551 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13552 			    (uint32_t)hsflctl << 16);
   13553 		} else
   13554 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13555 
   13556 		/*
   13557 		 * Write the last 24 bits of index into Flash Linear address
   13558 		 * field in Flash Address
   13559 		 */
   13560 		/* TODO: TBD maybe check the index against the size of flash */
   13561 
   13562 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13563 
   13564 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13565 
   13566 		/*
   13567 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13568 		 * the whole sequence a few more times, else read in (shift in)
   13569 		 * the Flash Data0, the order is least significant byte first
   13570 		 * msb to lsb
   13571 		 */
   13572 		if (error == 0) {
   13573 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13574 			if (size == 1)
   13575 				*data = (uint8_t)(flash_data & 0x000000FF);
   13576 			else if (size == 2)
   13577 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13578 			else if (size == 4)
   13579 				*data = (uint32_t)flash_data;
   13580 			break;
   13581 		} else {
   13582 			/*
   13583 			 * If we've gotten here, then things are probably
   13584 			 * completely hosed, but if the error condition is
   13585 			 * detected, it won't hurt to give it another try...
   13586 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13587 			 */
   13588 			if (sc->sc_type >= WM_T_PCH_SPT)
   13589 				hsfsts = ICH8_FLASH_READ32(sc,
   13590 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13591 			else
   13592 				hsfsts = ICH8_FLASH_READ16(sc,
   13593 				    ICH_FLASH_HSFSTS);
   13594 
   13595 			if (hsfsts & HSFSTS_ERR) {
   13596 				/* Repeat for some time before giving up. */
   13597 				continue;
   13598 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13599 				break;
   13600 		}
   13601 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13602 
   13603 	return error;
   13604 }
   13605 
   13606 /******************************************************************************
   13607  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13608  *
   13609  * sc - pointer to wm_hw structure
   13610  * index - The index of the byte to read.
   13611  * data - Pointer to a byte to store the value read.
   13612  *****************************************************************************/
   13613 static int32_t
   13614 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13615 {
   13616 	int32_t status;
   13617 	uint32_t word = 0;
   13618 
   13619 	status = wm_read_ich8_data(sc, index, 1, &word);
   13620 	if (status == 0)
   13621 		*data = (uint8_t)word;
   13622 	else
   13623 		*data = 0;
   13624 
   13625 	return status;
   13626 }
   13627 
   13628 /******************************************************************************
   13629  * Reads a word from the NVM using the ICH8 flash access registers.
   13630  *
   13631  * sc - pointer to wm_hw structure
   13632  * index - The starting byte index of the word to read.
   13633  * data - Pointer to a word to store the value read.
   13634  *****************************************************************************/
   13635 static int32_t
   13636 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13637 {
   13638 	int32_t status;
   13639 	uint32_t word = 0;
   13640 
   13641 	status = wm_read_ich8_data(sc, index, 2, &word);
   13642 	if (status == 0)
   13643 		*data = (uint16_t)word;
   13644 	else
   13645 		*data = 0;
   13646 
   13647 	return status;
   13648 }
   13649 
   13650 /******************************************************************************
   13651  * Reads a dword from the NVM using the ICH8 flash access registers.
   13652  *
   13653  * sc - pointer to wm_hw structure
   13654  * index - The starting byte index of the word to read.
   13655  * data - Pointer to a word to store the value read.
   13656  *****************************************************************************/
   13657 static int32_t
   13658 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13659 {
   13660 	int32_t status;
   13661 
   13662 	status = wm_read_ich8_data(sc, index, 4, data);
   13663 	return status;
   13664 }
   13665 
   13666 /******************************************************************************
   13667  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13668  * register.
   13669  *
   13670  * sc - Struct containing variables accessed by shared code
   13671  * offset - offset of word in the EEPROM to read
   13672  * data - word read from the EEPROM
   13673  * words - number of words to read
   13674  *****************************************************************************/
   13675 static int
   13676 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13677 {
   13678 	int32_t	 rv = 0;
   13679 	uint32_t flash_bank = 0;
   13680 	uint32_t act_offset = 0;
   13681 	uint32_t bank_offset = 0;
   13682 	uint16_t word = 0;
   13683 	uint16_t i = 0;
   13684 
   13685 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13686 		device_xname(sc->sc_dev), __func__));
   13687 
   13688 	if (sc->nvm.acquire(sc) != 0)
   13689 		return -1;
   13690 
   13691 	/*
   13692 	 * We need to know which is the valid flash bank.  In the event
   13693 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13694 	 * managing flash_bank. So it cannot be trusted and needs
   13695 	 * to be updated with each read.
   13696 	 */
   13697 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13698 	if (rv) {
   13699 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13700 			device_xname(sc->sc_dev)));
   13701 		flash_bank = 0;
   13702 	}
   13703 
   13704 	/*
   13705 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13706 	 * size
   13707 	 */
   13708 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13709 
   13710 	for (i = 0; i < words; i++) {
   13711 		/* The NVM part needs a byte offset, hence * 2 */
   13712 		act_offset = bank_offset + ((offset + i) * 2);
   13713 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13714 		if (rv) {
   13715 			aprint_error_dev(sc->sc_dev,
   13716 			    "%s: failed to read NVM\n", __func__);
   13717 			break;
   13718 		}
   13719 		data[i] = word;
   13720 	}
   13721 
   13722 	sc->nvm.release(sc);
   13723 	return rv;
   13724 }
   13725 
   13726 /******************************************************************************
   13727  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13728  * register.
   13729  *
   13730  * sc - Struct containing variables accessed by shared code
   13731  * offset - offset of word in the EEPROM to read
   13732  * data - word read from the EEPROM
   13733  * words - number of words to read
   13734  *****************************************************************************/
   13735 static int
   13736 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13737 {
   13738 	int32_t	 rv = 0;
   13739 	uint32_t flash_bank = 0;
   13740 	uint32_t act_offset = 0;
   13741 	uint32_t bank_offset = 0;
   13742 	uint32_t dword = 0;
   13743 	uint16_t i = 0;
   13744 
   13745 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13746 		device_xname(sc->sc_dev), __func__));
   13747 
   13748 	if (sc->nvm.acquire(sc) != 0)
   13749 		return -1;
   13750 
   13751 	/*
   13752 	 * We need to know which is the valid flash bank.  In the event
   13753 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13754 	 * managing flash_bank. So it cannot be trusted and needs
   13755 	 * to be updated with each read.
   13756 	 */
   13757 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13758 	if (rv) {
   13759 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13760 			device_xname(sc->sc_dev)));
   13761 		flash_bank = 0;
   13762 	}
   13763 
   13764 	/*
   13765 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13766 	 * size
   13767 	 */
   13768 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13769 
   13770 	for (i = 0; i < words; i++) {
   13771 		/* The NVM part needs a byte offset, hence * 2 */
   13772 		act_offset = bank_offset + ((offset + i) * 2);
   13773 		/* but we must read dword aligned, so mask ... */
   13774 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13775 		if (rv) {
   13776 			aprint_error_dev(sc->sc_dev,
   13777 			    "%s: failed to read NVM\n", __func__);
   13778 			break;
   13779 		}
   13780 		/* ... and pick out low or high word */
   13781 		if ((act_offset & 0x2) == 0)
   13782 			data[i] = (uint16_t)(dword & 0xFFFF);
   13783 		else
   13784 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13785 	}
   13786 
   13787 	sc->nvm.release(sc);
   13788 	return rv;
   13789 }
   13790 
   13791 /* iNVM */
   13792 
   13793 static int
   13794 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13795 {
   13796 	int32_t	 rv = 0;
   13797 	uint32_t invm_dword;
   13798 	uint16_t i;
   13799 	uint8_t record_type, word_address;
   13800 
   13801 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13802 		device_xname(sc->sc_dev), __func__));
   13803 
   13804 	for (i = 0; i < INVM_SIZE; i++) {
   13805 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13806 		/* Get record type */
   13807 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13808 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13809 			break;
   13810 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13811 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13812 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13813 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13814 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13815 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13816 			if (word_address == address) {
   13817 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13818 				rv = 0;
   13819 				break;
   13820 			}
   13821 		}
   13822 	}
   13823 
   13824 	return rv;
   13825 }
   13826 
   13827 static int
   13828 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13829 {
   13830 	int rv = 0;
   13831 	int i;
   13832 
   13833 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13834 		device_xname(sc->sc_dev), __func__));
   13835 
   13836 	if (sc->nvm.acquire(sc) != 0)
   13837 		return -1;
   13838 
   13839 	for (i = 0; i < words; i++) {
   13840 		switch (offset + i) {
   13841 		case NVM_OFF_MACADDR:
   13842 		case NVM_OFF_MACADDR1:
   13843 		case NVM_OFF_MACADDR2:
   13844 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13845 			if (rv != 0) {
   13846 				data[i] = 0xffff;
   13847 				rv = -1;
   13848 			}
   13849 			break;
   13850 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   13851 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13852 			if (rv != 0) {
   13853 				*data = INVM_DEFAULT_AL;
   13854 				rv = 0;
   13855 			}
   13856 			break;
   13857 		case NVM_OFF_CFG2:
   13858 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13859 			if (rv != 0) {
   13860 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13861 				rv = 0;
   13862 			}
   13863 			break;
   13864 		case NVM_OFF_CFG4:
   13865 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13866 			if (rv != 0) {
   13867 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13868 				rv = 0;
   13869 			}
   13870 			break;
   13871 		case NVM_OFF_LED_1_CFG:
   13872 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13873 			if (rv != 0) {
   13874 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13875 				rv = 0;
   13876 			}
   13877 			break;
   13878 		case NVM_OFF_LED_0_2_CFG:
   13879 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13880 			if (rv != 0) {
   13881 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13882 				rv = 0;
   13883 			}
   13884 			break;
   13885 		case NVM_OFF_ID_LED_SETTINGS:
   13886 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13887 			if (rv != 0) {
   13888 				*data = ID_LED_RESERVED_FFFF;
   13889 				rv = 0;
   13890 			}
   13891 			break;
   13892 		default:
   13893 			DPRINTF(sc, WM_DEBUG_NVM,
   13894 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13895 			*data = NVM_RESERVED_WORD;
   13896 			break;
   13897 		}
   13898 	}
   13899 
   13900 	sc->nvm.release(sc);
   13901 	return rv;
   13902 }
   13903 
   13904 /* Lock, detecting NVM type, validate checksum, version and read */
   13905 
   13906 static int
   13907 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13908 {
   13909 	uint32_t eecd = 0;
   13910 
   13911 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13912 	    || sc->sc_type == WM_T_82583) {
   13913 		eecd = CSR_READ(sc, WMREG_EECD);
   13914 
   13915 		/* Isolate bits 15 & 16 */
   13916 		eecd = ((eecd >> 15) & 0x03);
   13917 
   13918 		/* If both bits are set, device is Flash type */
   13919 		if (eecd == 0x03)
   13920 			return 0;
   13921 	}
   13922 	return 1;
   13923 }
   13924 
   13925 static int
   13926 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13927 {
   13928 	uint32_t eec;
   13929 
   13930 	eec = CSR_READ(sc, WMREG_EEC);
   13931 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13932 		return 1;
   13933 
   13934 	return 0;
   13935 }
   13936 
   13937 /*
   13938  * wm_nvm_validate_checksum
   13939  *
   13940  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13941  */
   13942 static int
   13943 wm_nvm_validate_checksum(struct wm_softc *sc)
   13944 {
   13945 	uint16_t checksum;
   13946 	uint16_t eeprom_data;
   13947 #ifdef WM_DEBUG
   13948 	uint16_t csum_wordaddr, valid_checksum;
   13949 #endif
   13950 	int i;
   13951 
   13952 	checksum = 0;
   13953 
   13954 	/* Don't check for I211 */
   13955 	if (sc->sc_type == WM_T_I211)
   13956 		return 0;
   13957 
   13958 #ifdef WM_DEBUG
   13959 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13960 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13961 		csum_wordaddr = NVM_OFF_COMPAT;
   13962 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13963 	} else {
   13964 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13965 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13966 	}
   13967 
   13968 	/* Dump EEPROM image for debug */
   13969 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13970 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13971 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13972 		/* XXX PCH_SPT? */
   13973 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13974 		if ((eeprom_data & valid_checksum) == 0)
   13975 			DPRINTF(sc, WM_DEBUG_NVM,
   13976 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13977 				device_xname(sc->sc_dev), eeprom_data,
   13978 				    valid_checksum));
   13979 	}
   13980 
   13981 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   13982 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13983 		for (i = 0; i < NVM_SIZE; i++) {
   13984 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13985 				printf("XXXX ");
   13986 			else
   13987 				printf("%04hx ", eeprom_data);
   13988 			if (i % 8 == 7)
   13989 				printf("\n");
   13990 		}
   13991 	}
   13992 
   13993 #endif /* WM_DEBUG */
   13994 
   13995 	for (i = 0; i < NVM_SIZE; i++) {
   13996 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13997 			return 1;
   13998 		checksum += eeprom_data;
   13999 	}
   14000 
   14001 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14002 #ifdef WM_DEBUG
   14003 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14004 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14005 #endif
   14006 	}
   14007 
   14008 	return 0;
   14009 }
   14010 
   14011 static void
   14012 wm_nvm_version_invm(struct wm_softc *sc)
   14013 {
   14014 	uint32_t dword;
   14015 
   14016 	/*
   14017 	 * Linux's code to decode version is very strange, so we don't
   14018 	 * obey that algorithm and just use word 61 as the document.
   14019 	 * Perhaps it's not perfect though...
   14020 	 *
   14021 	 * Example:
   14022 	 *
   14023 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14024 	 */
   14025 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14026 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14027 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14028 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14029 }
   14030 
   14031 static void
   14032 wm_nvm_version(struct wm_softc *sc)
   14033 {
   14034 	uint16_t major, minor, build, patch;
   14035 	uint16_t uid0, uid1;
   14036 	uint16_t nvm_data;
   14037 	uint16_t off;
   14038 	bool check_version = false;
   14039 	bool check_optionrom = false;
   14040 	bool have_build = false;
   14041 	bool have_uid = true;
   14042 
   14043 	/*
   14044 	 * Version format:
   14045 	 *
   14046 	 * XYYZ
   14047 	 * X0YZ
   14048 	 * X0YY
   14049 	 *
   14050 	 * Example:
   14051 	 *
   14052 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14053 	 *	82571	0x50a6	5.10.6?
   14054 	 *	82572	0x506a	5.6.10?
   14055 	 *	82572EI	0x5069	5.6.9?
   14056 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14057 	 *		0x2013	2.1.3?
   14058 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14059 	 * ICH8+82567	0x0040	0.4.0?
   14060 	 * ICH9+82566	0x1040	1.4.0?
   14061 	 *ICH10+82567	0x0043	0.4.3?
   14062 	 *  PCH+82577	0x00c1	0.12.1?
   14063 	 * PCH2+82579	0x00d3	0.13.3?
   14064 	 *		0x00d4	0.13.4?
   14065 	 *  LPT+I218	0x0023	0.2.3?
   14066 	 *  SPT+I219	0x0084	0.8.4?
   14067 	 *  CNP+I219	0x0054	0.5.4?
   14068 	 */
   14069 
   14070 	/*
   14071 	 * XXX
   14072 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14073 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   14074 	 */
   14075 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14076 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14077 		have_uid = false;
   14078 
   14079 	switch (sc->sc_type) {
   14080 	case WM_T_82571:
   14081 	case WM_T_82572:
   14082 	case WM_T_82574:
   14083 	case WM_T_82583:
   14084 		check_version = true;
   14085 		check_optionrom = true;
   14086 		have_build = true;
   14087 		break;
   14088 	case WM_T_ICH8:
   14089 	case WM_T_ICH9:
   14090 	case WM_T_ICH10:
   14091 	case WM_T_PCH:
   14092 	case WM_T_PCH2:
   14093 	case WM_T_PCH_LPT:
   14094 	case WM_T_PCH_SPT:
   14095 	case WM_T_PCH_CNP:
   14096 		check_version = true;
   14097 		have_build = true;
   14098 		have_uid = false;
   14099 		break;
   14100 	case WM_T_82575:
   14101 	case WM_T_82576:
   14102 	case WM_T_82580:
   14103 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14104 			check_version = true;
   14105 		break;
   14106 	case WM_T_I211:
   14107 		wm_nvm_version_invm(sc);
   14108 		have_uid = false;
   14109 		goto printver;
   14110 	case WM_T_I210:
   14111 		if (!wm_nvm_flash_presence_i210(sc)) {
   14112 			wm_nvm_version_invm(sc);
   14113 			have_uid = false;
   14114 			goto printver;
   14115 		}
   14116 		/* FALLTHROUGH */
   14117 	case WM_T_I350:
   14118 	case WM_T_I354:
   14119 		check_version = true;
   14120 		check_optionrom = true;
   14121 		break;
   14122 	default:
   14123 		return;
   14124 	}
   14125 	if (check_version
   14126 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14127 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14128 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14129 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14130 			build = nvm_data & NVM_BUILD_MASK;
   14131 			have_build = true;
   14132 		} else
   14133 			minor = nvm_data & 0x00ff;
   14134 
   14135 		/* Decimal */
   14136 		minor = (minor / 16) * 10 + (minor % 16);
   14137 		sc->sc_nvm_ver_major = major;
   14138 		sc->sc_nvm_ver_minor = minor;
   14139 
   14140 printver:
   14141 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14142 		    sc->sc_nvm_ver_minor);
   14143 		if (have_build) {
   14144 			sc->sc_nvm_ver_build = build;
   14145 			aprint_verbose(".%d", build);
   14146 		}
   14147 	}
   14148 
   14149 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14150 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14151 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14152 		/* Option ROM Version */
   14153 		if ((off != 0x0000) && (off != 0xffff)) {
   14154 			int rv;
   14155 
   14156 			off += NVM_COMBO_VER_OFF;
   14157 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14158 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14159 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14160 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14161 				/* 16bits */
   14162 				major = uid0 >> 8;
   14163 				build = (uid0 << 8) | (uid1 >> 8);
   14164 				patch = uid1 & 0x00ff;
   14165 				aprint_verbose(", option ROM Version %d.%d.%d",
   14166 				    major, build, patch);
   14167 			}
   14168 		}
   14169 	}
   14170 
   14171 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14172 		aprint_verbose(", Image Unique ID %08x",
   14173 		    ((uint32_t)uid1 << 16) | uid0);
   14174 }
   14175 
   14176 /*
   14177  * wm_nvm_read:
   14178  *
   14179  *	Read data from the serial EEPROM.
   14180  */
   14181 static int
   14182 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14183 {
   14184 	int rv;
   14185 
   14186 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14187 		device_xname(sc->sc_dev), __func__));
   14188 
   14189 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14190 		return -1;
   14191 
   14192 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14193 
   14194 	return rv;
   14195 }
   14196 
   14197 /*
   14198  * Hardware semaphores.
   14199  * Very complexed...
   14200  */
   14201 
   14202 static int
   14203 wm_get_null(struct wm_softc *sc)
   14204 {
   14205 
   14206 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14207 		device_xname(sc->sc_dev), __func__));
   14208 	return 0;
   14209 }
   14210 
   14211 static void
   14212 wm_put_null(struct wm_softc *sc)
   14213 {
   14214 
   14215 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14216 		device_xname(sc->sc_dev), __func__));
   14217 	return;
   14218 }
   14219 
   14220 static int
   14221 wm_get_eecd(struct wm_softc *sc)
   14222 {
   14223 	uint32_t reg;
   14224 	int x;
   14225 
   14226 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14227 		device_xname(sc->sc_dev), __func__));
   14228 
   14229 	reg = CSR_READ(sc, WMREG_EECD);
   14230 
   14231 	/* Request EEPROM access. */
   14232 	reg |= EECD_EE_REQ;
   14233 	CSR_WRITE(sc, WMREG_EECD, reg);
   14234 
   14235 	/* ..and wait for it to be granted. */
   14236 	for (x = 0; x < 1000; x++) {
   14237 		reg = CSR_READ(sc, WMREG_EECD);
   14238 		if (reg & EECD_EE_GNT)
   14239 			break;
   14240 		delay(5);
   14241 	}
   14242 	if ((reg & EECD_EE_GNT) == 0) {
   14243 		aprint_error_dev(sc->sc_dev,
   14244 		    "could not acquire EEPROM GNT\n");
   14245 		reg &= ~EECD_EE_REQ;
   14246 		CSR_WRITE(sc, WMREG_EECD, reg);
   14247 		return -1;
   14248 	}
   14249 
   14250 	return 0;
   14251 }
   14252 
   14253 static void
   14254 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14255 {
   14256 
   14257 	*eecd |= EECD_SK;
   14258 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14259 	CSR_WRITE_FLUSH(sc);
   14260 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14261 		delay(1);
   14262 	else
   14263 		delay(50);
   14264 }
   14265 
   14266 static void
   14267 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14268 {
   14269 
   14270 	*eecd &= ~EECD_SK;
   14271 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14272 	CSR_WRITE_FLUSH(sc);
   14273 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14274 		delay(1);
   14275 	else
   14276 		delay(50);
   14277 }
   14278 
   14279 static void
   14280 wm_put_eecd(struct wm_softc *sc)
   14281 {
   14282 	uint32_t reg;
   14283 
   14284 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14285 		device_xname(sc->sc_dev), __func__));
   14286 
   14287 	/* Stop nvm */
   14288 	reg = CSR_READ(sc, WMREG_EECD);
   14289 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14290 		/* Pull CS high */
   14291 		reg |= EECD_CS;
   14292 		wm_nvm_eec_clock_lower(sc, &reg);
   14293 	} else {
   14294 		/* CS on Microwire is active-high */
   14295 		reg &= ~(EECD_CS | EECD_DI);
   14296 		CSR_WRITE(sc, WMREG_EECD, reg);
   14297 		wm_nvm_eec_clock_raise(sc, &reg);
   14298 		wm_nvm_eec_clock_lower(sc, &reg);
   14299 	}
   14300 
   14301 	reg = CSR_READ(sc, WMREG_EECD);
   14302 	reg &= ~EECD_EE_REQ;
   14303 	CSR_WRITE(sc, WMREG_EECD, reg);
   14304 
   14305 	return;
   14306 }
   14307 
   14308 /*
   14309  * Get hardware semaphore.
   14310  * Same as e1000_get_hw_semaphore_generic()
   14311  */
   14312 static int
   14313 wm_get_swsm_semaphore(struct wm_softc *sc)
   14314 {
   14315 	int32_t timeout;
   14316 	uint32_t swsm;
   14317 
   14318 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14319 		device_xname(sc->sc_dev), __func__));
   14320 	KASSERT(sc->sc_nvm_wordsize > 0);
   14321 
   14322 retry:
   14323 	/* Get the SW semaphore. */
   14324 	timeout = sc->sc_nvm_wordsize + 1;
   14325 	while (timeout) {
   14326 		swsm = CSR_READ(sc, WMREG_SWSM);
   14327 
   14328 		if ((swsm & SWSM_SMBI) == 0)
   14329 			break;
   14330 
   14331 		delay(50);
   14332 		timeout--;
   14333 	}
   14334 
   14335 	if (timeout == 0) {
   14336 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14337 			/*
   14338 			 * In rare circumstances, the SW semaphore may already
   14339 			 * be held unintentionally. Clear the semaphore once
   14340 			 * before giving up.
   14341 			 */
   14342 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14343 			wm_put_swsm_semaphore(sc);
   14344 			goto retry;
   14345 		}
   14346 		aprint_error_dev(sc->sc_dev,
   14347 		    "could not acquire SWSM SMBI\n");
   14348 		return 1;
   14349 	}
   14350 
   14351 	/* Get the FW semaphore. */
   14352 	timeout = sc->sc_nvm_wordsize + 1;
   14353 	while (timeout) {
   14354 		swsm = CSR_READ(sc, WMREG_SWSM);
   14355 		swsm |= SWSM_SWESMBI;
   14356 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14357 		/* If we managed to set the bit we got the semaphore. */
   14358 		swsm = CSR_READ(sc, WMREG_SWSM);
   14359 		if (swsm & SWSM_SWESMBI)
   14360 			break;
   14361 
   14362 		delay(50);
   14363 		timeout--;
   14364 	}
   14365 
   14366 	if (timeout == 0) {
   14367 		aprint_error_dev(sc->sc_dev,
   14368 		    "could not acquire SWSM SWESMBI\n");
   14369 		/* Release semaphores */
   14370 		wm_put_swsm_semaphore(sc);
   14371 		return 1;
   14372 	}
   14373 	return 0;
   14374 }
   14375 
   14376 /*
   14377  * Put hardware semaphore.
   14378  * Same as e1000_put_hw_semaphore_generic()
   14379  */
   14380 static void
   14381 wm_put_swsm_semaphore(struct wm_softc *sc)
   14382 {
   14383 	uint32_t swsm;
   14384 
   14385 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14386 		device_xname(sc->sc_dev), __func__));
   14387 
   14388 	swsm = CSR_READ(sc, WMREG_SWSM);
   14389 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14390 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14391 }
   14392 
   14393 /*
   14394  * Get SW/FW semaphore.
   14395  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14396  */
   14397 static int
   14398 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14399 {
   14400 	uint32_t swfw_sync;
   14401 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14402 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14403 	int timeout;
   14404 
   14405 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14406 		device_xname(sc->sc_dev), __func__));
   14407 
   14408 	if (sc->sc_type == WM_T_80003)
   14409 		timeout = 50;
   14410 	else
   14411 		timeout = 200;
   14412 
   14413 	while (timeout) {
   14414 		if (wm_get_swsm_semaphore(sc)) {
   14415 			aprint_error_dev(sc->sc_dev,
   14416 			    "%s: failed to get semaphore\n",
   14417 			    __func__);
   14418 			return 1;
   14419 		}
   14420 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14421 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14422 			swfw_sync |= swmask;
   14423 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14424 			wm_put_swsm_semaphore(sc);
   14425 			return 0;
   14426 		}
   14427 		wm_put_swsm_semaphore(sc);
   14428 		delay(5000);
   14429 		timeout--;
   14430 	}
   14431 	device_printf(sc->sc_dev,
   14432 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14433 	    mask, swfw_sync);
   14434 	return 1;
   14435 }
   14436 
   14437 static void
   14438 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14439 {
   14440 	uint32_t swfw_sync;
   14441 
   14442 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14443 		device_xname(sc->sc_dev), __func__));
   14444 
   14445 	while (wm_get_swsm_semaphore(sc) != 0)
   14446 		continue;
   14447 
   14448 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14449 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14450 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14451 
   14452 	wm_put_swsm_semaphore(sc);
   14453 }
   14454 
   14455 static int
   14456 wm_get_nvm_80003(struct wm_softc *sc)
   14457 {
   14458 	int rv;
   14459 
   14460 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14461 		device_xname(sc->sc_dev), __func__));
   14462 
   14463 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14464 		aprint_error_dev(sc->sc_dev,
   14465 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14466 		return rv;
   14467 	}
   14468 
   14469 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14470 	    && (rv = wm_get_eecd(sc)) != 0) {
   14471 		aprint_error_dev(sc->sc_dev,
   14472 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14473 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14474 		return rv;
   14475 	}
   14476 
   14477 	return 0;
   14478 }
   14479 
   14480 static void
   14481 wm_put_nvm_80003(struct wm_softc *sc)
   14482 {
   14483 
   14484 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14485 		device_xname(sc->sc_dev), __func__));
   14486 
   14487 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14488 		wm_put_eecd(sc);
   14489 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14490 }
   14491 
   14492 static int
   14493 wm_get_nvm_82571(struct wm_softc *sc)
   14494 {
   14495 	int rv;
   14496 
   14497 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14498 		device_xname(sc->sc_dev), __func__));
   14499 
   14500 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14501 		return rv;
   14502 
   14503 	switch (sc->sc_type) {
   14504 	case WM_T_82573:
   14505 		break;
   14506 	default:
   14507 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14508 			rv = wm_get_eecd(sc);
   14509 		break;
   14510 	}
   14511 
   14512 	if (rv != 0) {
   14513 		aprint_error_dev(sc->sc_dev,
   14514 		    "%s: failed to get semaphore\n",
   14515 		    __func__);
   14516 		wm_put_swsm_semaphore(sc);
   14517 	}
   14518 
   14519 	return rv;
   14520 }
   14521 
   14522 static void
   14523 wm_put_nvm_82571(struct wm_softc *sc)
   14524 {
   14525 
   14526 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14527 		device_xname(sc->sc_dev), __func__));
   14528 
   14529 	switch (sc->sc_type) {
   14530 	case WM_T_82573:
   14531 		break;
   14532 	default:
   14533 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14534 			wm_put_eecd(sc);
   14535 		break;
   14536 	}
   14537 
   14538 	wm_put_swsm_semaphore(sc);
   14539 }
   14540 
   14541 static int
   14542 wm_get_phy_82575(struct wm_softc *sc)
   14543 {
   14544 
   14545 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14546 		device_xname(sc->sc_dev), __func__));
   14547 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14548 }
   14549 
   14550 static void
   14551 wm_put_phy_82575(struct wm_softc *sc)
   14552 {
   14553 
   14554 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14555 		device_xname(sc->sc_dev), __func__));
   14556 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14557 }
   14558 
   14559 static int
   14560 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14561 {
   14562 	uint32_t ext_ctrl;
   14563 	int timeout = 200;
   14564 
   14565 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14566 		device_xname(sc->sc_dev), __func__));
   14567 
   14568 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14569 	for (timeout = 0; timeout < 200; timeout++) {
   14570 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14571 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14572 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14573 
   14574 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14575 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14576 			return 0;
   14577 		delay(5000);
   14578 	}
   14579 	device_printf(sc->sc_dev,
   14580 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14581 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14582 	return 1;
   14583 }
   14584 
   14585 static void
   14586 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14587 {
   14588 	uint32_t ext_ctrl;
   14589 
   14590 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14591 		device_xname(sc->sc_dev), __func__));
   14592 
   14593 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14594 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14595 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14596 
   14597 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14598 }
   14599 
   14600 static int
   14601 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14602 {
   14603 	uint32_t ext_ctrl;
   14604 	int timeout;
   14605 
   14606 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14607 		device_xname(sc->sc_dev), __func__));
   14608 	mutex_enter(sc->sc_ich_phymtx);
   14609 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14610 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14611 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14612 			break;
   14613 		delay(1000);
   14614 	}
   14615 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14616 		device_printf(sc->sc_dev,
   14617 		    "SW has already locked the resource\n");
   14618 		goto out;
   14619 	}
   14620 
   14621 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14622 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14623 	for (timeout = 0; timeout < 1000; timeout++) {
   14624 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14625 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14626 			break;
   14627 		delay(1000);
   14628 	}
   14629 	if (timeout >= 1000) {
   14630 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14631 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14632 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14633 		goto out;
   14634 	}
   14635 	return 0;
   14636 
   14637 out:
   14638 	mutex_exit(sc->sc_ich_phymtx);
   14639 	return 1;
   14640 }
   14641 
   14642 static void
   14643 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14644 {
   14645 	uint32_t ext_ctrl;
   14646 
   14647 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14648 		device_xname(sc->sc_dev), __func__));
   14649 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14650 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14651 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14652 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14653 	} else {
   14654 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14655 	}
   14656 
   14657 	mutex_exit(sc->sc_ich_phymtx);
   14658 }
   14659 
   14660 static int
   14661 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14662 {
   14663 
   14664 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14665 		device_xname(sc->sc_dev), __func__));
   14666 	mutex_enter(sc->sc_ich_nvmmtx);
   14667 
   14668 	return 0;
   14669 }
   14670 
   14671 static void
   14672 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14673 {
   14674 
   14675 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14676 		device_xname(sc->sc_dev), __func__));
   14677 	mutex_exit(sc->sc_ich_nvmmtx);
   14678 }
   14679 
   14680 static int
   14681 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14682 {
   14683 	int i = 0;
   14684 	uint32_t reg;
   14685 
   14686 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14687 		device_xname(sc->sc_dev), __func__));
   14688 
   14689 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14690 	do {
   14691 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14692 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14693 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14694 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14695 			break;
   14696 		delay(2*1000);
   14697 		i++;
   14698 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14699 
   14700 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14701 		wm_put_hw_semaphore_82573(sc);
   14702 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14703 		    device_xname(sc->sc_dev));
   14704 		return -1;
   14705 	}
   14706 
   14707 	return 0;
   14708 }
   14709 
   14710 static void
   14711 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14712 {
   14713 	uint32_t reg;
   14714 
   14715 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14716 		device_xname(sc->sc_dev), __func__));
   14717 
   14718 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14719 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14720 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14721 }
   14722 
   14723 /*
   14724  * Management mode and power management related subroutines.
   14725  * BMC, AMT, suspend/resume and EEE.
   14726  */
   14727 
   14728 #ifdef WM_WOL
   14729 static int
   14730 wm_check_mng_mode(struct wm_softc *sc)
   14731 {
   14732 	int rv;
   14733 
   14734 	switch (sc->sc_type) {
   14735 	case WM_T_ICH8:
   14736 	case WM_T_ICH9:
   14737 	case WM_T_ICH10:
   14738 	case WM_T_PCH:
   14739 	case WM_T_PCH2:
   14740 	case WM_T_PCH_LPT:
   14741 	case WM_T_PCH_SPT:
   14742 	case WM_T_PCH_CNP:
   14743 		rv = wm_check_mng_mode_ich8lan(sc);
   14744 		break;
   14745 	case WM_T_82574:
   14746 	case WM_T_82583:
   14747 		rv = wm_check_mng_mode_82574(sc);
   14748 		break;
   14749 	case WM_T_82571:
   14750 	case WM_T_82572:
   14751 	case WM_T_82573:
   14752 	case WM_T_80003:
   14753 		rv = wm_check_mng_mode_generic(sc);
   14754 		break;
   14755 	default:
   14756 		/* Noting to do */
   14757 		rv = 0;
   14758 		break;
   14759 	}
   14760 
   14761 	return rv;
   14762 }
   14763 
   14764 static int
   14765 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14766 {
   14767 	uint32_t fwsm;
   14768 
   14769 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14770 
   14771 	if (((fwsm & FWSM_FW_VALID) != 0)
   14772 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14773 		return 1;
   14774 
   14775 	return 0;
   14776 }
   14777 
   14778 static int
   14779 wm_check_mng_mode_82574(struct wm_softc *sc)
   14780 {
   14781 	uint16_t data;
   14782 
   14783 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14784 
   14785 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14786 		return 1;
   14787 
   14788 	return 0;
   14789 }
   14790 
   14791 static int
   14792 wm_check_mng_mode_generic(struct wm_softc *sc)
   14793 {
   14794 	uint32_t fwsm;
   14795 
   14796 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14797 
   14798 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14799 		return 1;
   14800 
   14801 	return 0;
   14802 }
   14803 #endif /* WM_WOL */
   14804 
   14805 static int
   14806 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14807 {
   14808 	uint32_t manc, fwsm, factps;
   14809 
   14810 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14811 		return 0;
   14812 
   14813 	manc = CSR_READ(sc, WMREG_MANC);
   14814 
   14815 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14816 		device_xname(sc->sc_dev), manc));
   14817 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14818 		return 0;
   14819 
   14820 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14821 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14822 		factps = CSR_READ(sc, WMREG_FACTPS);
   14823 		if (((factps & FACTPS_MNGCG) == 0)
   14824 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14825 			return 1;
   14826 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14827 		uint16_t data;
   14828 
   14829 		factps = CSR_READ(sc, WMREG_FACTPS);
   14830 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14831 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14832 			device_xname(sc->sc_dev), factps, data));
   14833 		if (((factps & FACTPS_MNGCG) == 0)
   14834 		    && ((data & NVM_CFG2_MNGM_MASK)
   14835 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14836 			return 1;
   14837 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14838 	    && ((manc & MANC_ASF_EN) == 0))
   14839 		return 1;
   14840 
   14841 	return 0;
   14842 }
   14843 
   14844 static bool
   14845 wm_phy_resetisblocked(struct wm_softc *sc)
   14846 {
   14847 	bool blocked = false;
   14848 	uint32_t reg;
   14849 	int i = 0;
   14850 
   14851 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14852 		device_xname(sc->sc_dev), __func__));
   14853 
   14854 	switch (sc->sc_type) {
   14855 	case WM_T_ICH8:
   14856 	case WM_T_ICH9:
   14857 	case WM_T_ICH10:
   14858 	case WM_T_PCH:
   14859 	case WM_T_PCH2:
   14860 	case WM_T_PCH_LPT:
   14861 	case WM_T_PCH_SPT:
   14862 	case WM_T_PCH_CNP:
   14863 		do {
   14864 			reg = CSR_READ(sc, WMREG_FWSM);
   14865 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14866 				blocked = true;
   14867 				delay(10*1000);
   14868 				continue;
   14869 			}
   14870 			blocked = false;
   14871 		} while (blocked && (i++ < 30));
   14872 		return blocked;
   14873 		break;
   14874 	case WM_T_82571:
   14875 	case WM_T_82572:
   14876 	case WM_T_82573:
   14877 	case WM_T_82574:
   14878 	case WM_T_82583:
   14879 	case WM_T_80003:
   14880 		reg = CSR_READ(sc, WMREG_MANC);
   14881 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14882 			return true;
   14883 		else
   14884 			return false;
   14885 		break;
   14886 	default:
   14887 		/* No problem */
   14888 		break;
   14889 	}
   14890 
   14891 	return false;
   14892 }
   14893 
   14894 static void
   14895 wm_get_hw_control(struct wm_softc *sc)
   14896 {
   14897 	uint32_t reg;
   14898 
   14899 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14900 		device_xname(sc->sc_dev), __func__));
   14901 
   14902 	if (sc->sc_type == WM_T_82573) {
   14903 		reg = CSR_READ(sc, WMREG_SWSM);
   14904 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14905 	} else if (sc->sc_type >= WM_T_82571) {
   14906 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14907 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14908 	}
   14909 }
   14910 
   14911 static void
   14912 wm_release_hw_control(struct wm_softc *sc)
   14913 {
   14914 	uint32_t reg;
   14915 
   14916 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14917 		device_xname(sc->sc_dev), __func__));
   14918 
   14919 	if (sc->sc_type == WM_T_82573) {
   14920 		reg = CSR_READ(sc, WMREG_SWSM);
   14921 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14922 	} else if (sc->sc_type >= WM_T_82571) {
   14923 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14924 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14925 	}
   14926 }
   14927 
   14928 static void
   14929 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14930 {
   14931 	uint32_t reg;
   14932 
   14933 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14934 		device_xname(sc->sc_dev), __func__));
   14935 
   14936 	if (sc->sc_type < WM_T_PCH2)
   14937 		return;
   14938 
   14939 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14940 
   14941 	if (gate)
   14942 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14943 	else
   14944 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14945 
   14946 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14947 }
   14948 
   14949 static int
   14950 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14951 {
   14952 	uint32_t fwsm, reg;
   14953 	int rv = 0;
   14954 
   14955 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14956 		device_xname(sc->sc_dev), __func__));
   14957 
   14958 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14959 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14960 
   14961 	/* Disable ULP */
   14962 	wm_ulp_disable(sc);
   14963 
   14964 	/* Acquire PHY semaphore */
   14965 	rv = sc->phy.acquire(sc);
   14966 	if (rv != 0) {
   14967 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   14968 		device_xname(sc->sc_dev), __func__));
   14969 		return -1;
   14970 	}
   14971 
   14972 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14973 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14974 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14975 	 */
   14976 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14977 	switch (sc->sc_type) {
   14978 	case WM_T_PCH_LPT:
   14979 	case WM_T_PCH_SPT:
   14980 	case WM_T_PCH_CNP:
   14981 		if (wm_phy_is_accessible_pchlan(sc))
   14982 			break;
   14983 
   14984 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14985 		 * forcing MAC to SMBus mode first.
   14986 		 */
   14987 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14988 		reg |= CTRL_EXT_FORCE_SMBUS;
   14989 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14990 #if 0
   14991 		/* XXX Isn't this required??? */
   14992 		CSR_WRITE_FLUSH(sc);
   14993 #endif
   14994 		/* Wait 50 milliseconds for MAC to finish any retries
   14995 		 * that it might be trying to perform from previous
   14996 		 * attempts to acknowledge any phy read requests.
   14997 		 */
   14998 		delay(50 * 1000);
   14999 		/* FALLTHROUGH */
   15000 	case WM_T_PCH2:
   15001 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15002 			break;
   15003 		/* FALLTHROUGH */
   15004 	case WM_T_PCH:
   15005 		if (sc->sc_type == WM_T_PCH)
   15006 			if ((fwsm & FWSM_FW_VALID) != 0)
   15007 				break;
   15008 
   15009 		if (wm_phy_resetisblocked(sc) == true) {
   15010 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15011 			break;
   15012 		}
   15013 
   15014 		/* Toggle LANPHYPC Value bit */
   15015 		wm_toggle_lanphypc_pch_lpt(sc);
   15016 
   15017 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15018 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15019 				break;
   15020 
   15021 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15022 			 * so ensure that the MAC is also out of SMBus mode
   15023 			 */
   15024 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15025 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15026 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15027 
   15028 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15029 				break;
   15030 			rv = -1;
   15031 		}
   15032 		break;
   15033 	default:
   15034 		break;
   15035 	}
   15036 
   15037 	/* Release semaphore */
   15038 	sc->phy.release(sc);
   15039 
   15040 	if (rv == 0) {
   15041 		/* Check to see if able to reset PHY.  Print error if not */
   15042 		if (wm_phy_resetisblocked(sc)) {
   15043 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15044 			goto out;
   15045 		}
   15046 
   15047 		/* Reset the PHY before any access to it.  Doing so, ensures
   15048 		 * that the PHY is in a known good state before we read/write
   15049 		 * PHY registers.  The generic reset is sufficient here,
   15050 		 * because we haven't determined the PHY type yet.
   15051 		 */
   15052 		if (wm_reset_phy(sc) != 0)
   15053 			goto out;
   15054 
   15055 		/* On a successful reset, possibly need to wait for the PHY
   15056 		 * to quiesce to an accessible state before returning control
   15057 		 * to the calling function.  If the PHY does not quiesce, then
   15058 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15059 		 *  the PHY is in.
   15060 		 */
   15061 		if (wm_phy_resetisblocked(sc))
   15062 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15063 	}
   15064 
   15065 out:
   15066 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15067 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15068 		delay(10*1000);
   15069 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15070 	}
   15071 
   15072 	return 0;
   15073 }
   15074 
   15075 static void
   15076 wm_init_manageability(struct wm_softc *sc)
   15077 {
   15078 
   15079 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15080 		device_xname(sc->sc_dev), __func__));
   15081 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15082 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15083 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15084 
   15085 		/* Disable hardware interception of ARP */
   15086 		manc &= ~MANC_ARP_EN;
   15087 
   15088 		/* Enable receiving management packets to the host */
   15089 		if (sc->sc_type >= WM_T_82571) {
   15090 			manc |= MANC_EN_MNG2HOST;
   15091 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15092 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15093 		}
   15094 
   15095 		CSR_WRITE(sc, WMREG_MANC, manc);
   15096 	}
   15097 }
   15098 
   15099 static void
   15100 wm_release_manageability(struct wm_softc *sc)
   15101 {
   15102 
   15103 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15104 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15105 
   15106 		manc |= MANC_ARP_EN;
   15107 		if (sc->sc_type >= WM_T_82571)
   15108 			manc &= ~MANC_EN_MNG2HOST;
   15109 
   15110 		CSR_WRITE(sc, WMREG_MANC, manc);
   15111 	}
   15112 }
   15113 
   15114 static void
   15115 wm_get_wakeup(struct wm_softc *sc)
   15116 {
   15117 
   15118 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15119 	switch (sc->sc_type) {
   15120 	case WM_T_82573:
   15121 	case WM_T_82583:
   15122 		sc->sc_flags |= WM_F_HAS_AMT;
   15123 		/* FALLTHROUGH */
   15124 	case WM_T_80003:
   15125 	case WM_T_82575:
   15126 	case WM_T_82576:
   15127 	case WM_T_82580:
   15128 	case WM_T_I350:
   15129 	case WM_T_I354:
   15130 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15131 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15132 		/* FALLTHROUGH */
   15133 	case WM_T_82541:
   15134 	case WM_T_82541_2:
   15135 	case WM_T_82547:
   15136 	case WM_T_82547_2:
   15137 	case WM_T_82571:
   15138 	case WM_T_82572:
   15139 	case WM_T_82574:
   15140 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15141 		break;
   15142 	case WM_T_ICH8:
   15143 	case WM_T_ICH9:
   15144 	case WM_T_ICH10:
   15145 	case WM_T_PCH:
   15146 	case WM_T_PCH2:
   15147 	case WM_T_PCH_LPT:
   15148 	case WM_T_PCH_SPT:
   15149 	case WM_T_PCH_CNP:
   15150 		sc->sc_flags |= WM_F_HAS_AMT;
   15151 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15152 		break;
   15153 	default:
   15154 		break;
   15155 	}
   15156 
   15157 	/* 1: HAS_MANAGE */
   15158 	if (wm_enable_mng_pass_thru(sc) != 0)
   15159 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15160 
   15161 	/*
   15162 	 * Note that the WOL flags is set after the resetting of the eeprom
   15163 	 * stuff
   15164 	 */
   15165 }
   15166 
   15167 /*
   15168  * Unconfigure Ultra Low Power mode.
   15169  * Only for I217 and newer (see below).
   15170  */
   15171 static int
   15172 wm_ulp_disable(struct wm_softc *sc)
   15173 {
   15174 	uint32_t reg;
   15175 	uint16_t phyreg;
   15176 	int i = 0, rv = 0;
   15177 
   15178 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15179 		device_xname(sc->sc_dev), __func__));
   15180 	/* Exclude old devices */
   15181 	if ((sc->sc_type < WM_T_PCH_LPT)
   15182 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15183 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15184 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15185 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15186 		return 0;
   15187 
   15188 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15189 		/* Request ME un-configure ULP mode in the PHY */
   15190 		reg = CSR_READ(sc, WMREG_H2ME);
   15191 		reg &= ~H2ME_ULP;
   15192 		reg |= H2ME_ENFORCE_SETTINGS;
   15193 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15194 
   15195 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15196 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15197 			if (i++ == 30) {
   15198 				device_printf(sc->sc_dev, "%s timed out\n",
   15199 				    __func__);
   15200 				return -1;
   15201 			}
   15202 			delay(10 * 1000);
   15203 		}
   15204 		reg = CSR_READ(sc, WMREG_H2ME);
   15205 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15206 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15207 
   15208 		return 0;
   15209 	}
   15210 
   15211 	/* Acquire semaphore */
   15212 	rv = sc->phy.acquire(sc);
   15213 	if (rv != 0) {
   15214 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15215 		device_xname(sc->sc_dev), __func__));
   15216 		return -1;
   15217 	}
   15218 
   15219 	/* Toggle LANPHYPC */
   15220 	wm_toggle_lanphypc_pch_lpt(sc);
   15221 
   15222 	/* Unforce SMBus mode in PHY */
   15223 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15224 	if (rv != 0) {
   15225 		uint32_t reg2;
   15226 
   15227 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15228 			__func__);
   15229 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15230 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15231 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15232 		delay(50 * 1000);
   15233 
   15234 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15235 		    &phyreg);
   15236 		if (rv != 0)
   15237 			goto release;
   15238 	}
   15239 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15240 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15241 
   15242 	/* Unforce SMBus mode in MAC */
   15243 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15244 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15245 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15246 
   15247 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15248 	if (rv != 0)
   15249 		goto release;
   15250 	phyreg |= HV_PM_CTRL_K1_ENA;
   15251 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15252 
   15253 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15254 		&phyreg);
   15255 	if (rv != 0)
   15256 		goto release;
   15257 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15258 	    | I218_ULP_CONFIG1_STICKY_ULP
   15259 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15260 	    | I218_ULP_CONFIG1_WOL_HOST
   15261 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15262 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15263 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15264 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15265 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15266 	phyreg |= I218_ULP_CONFIG1_START;
   15267 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15268 
   15269 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15270 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15271 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15272 
   15273 release:
   15274 	/* Release semaphore */
   15275 	sc->phy.release(sc);
   15276 	wm_gmii_reset(sc);
   15277 	delay(50 * 1000);
   15278 
   15279 	return rv;
   15280 }
   15281 
   15282 /* WOL in the newer chipset interfaces (pchlan) */
   15283 static int
   15284 wm_enable_phy_wakeup(struct wm_softc *sc)
   15285 {
   15286 	device_t dev = sc->sc_dev;
   15287 	uint32_t mreg, moff;
   15288 	uint16_t wuce, wuc, wufc, preg;
   15289 	int i, rv;
   15290 
   15291 	KASSERT(sc->sc_type >= WM_T_PCH);
   15292 
   15293 	/* Copy MAC RARs to PHY RARs */
   15294 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15295 
   15296 	/* Activate PHY wakeup */
   15297 	rv = sc->phy.acquire(sc);
   15298 	if (rv != 0) {
   15299 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15300 		    __func__);
   15301 		return rv;
   15302 	}
   15303 
   15304 	/*
   15305 	 * Enable access to PHY wakeup registers.
   15306 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15307 	 */
   15308 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15309 	if (rv != 0) {
   15310 		device_printf(dev,
   15311 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15312 		goto release;
   15313 	}
   15314 
   15315 	/* Copy MAC MTA to PHY MTA */
   15316 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15317 		uint16_t lo, hi;
   15318 
   15319 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15320 		lo = (uint16_t)(mreg & 0xffff);
   15321 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15322 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15323 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15324 	}
   15325 
   15326 	/* Configure PHY Rx Control register */
   15327 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15328 	mreg = CSR_READ(sc, WMREG_RCTL);
   15329 	if (mreg & RCTL_UPE)
   15330 		preg |= BM_RCTL_UPE;
   15331 	if (mreg & RCTL_MPE)
   15332 		preg |= BM_RCTL_MPE;
   15333 	preg &= ~(BM_RCTL_MO_MASK);
   15334 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15335 	if (moff != 0)
   15336 		preg |= moff << BM_RCTL_MO_SHIFT;
   15337 	if (mreg & RCTL_BAM)
   15338 		preg |= BM_RCTL_BAM;
   15339 	if (mreg & RCTL_PMCF)
   15340 		preg |= BM_RCTL_PMCF;
   15341 	mreg = CSR_READ(sc, WMREG_CTRL);
   15342 	if (mreg & CTRL_RFCE)
   15343 		preg |= BM_RCTL_RFCE;
   15344 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15345 
   15346 	wuc = WUC_APME | WUC_PME_EN;
   15347 	wufc = WUFC_MAG;
   15348 	/* Enable PHY wakeup in MAC register */
   15349 	CSR_WRITE(sc, WMREG_WUC,
   15350 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15351 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15352 
   15353 	/* Configure and enable PHY wakeup in PHY registers */
   15354 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15355 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15356 
   15357 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15358 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15359 
   15360 release:
   15361 	sc->phy.release(sc);
   15362 
   15363 	return 0;
   15364 }
   15365 
   15366 /* Power down workaround on D3 */
   15367 static void
   15368 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15369 {
   15370 	uint32_t reg;
   15371 	uint16_t phyreg;
   15372 	int i;
   15373 
   15374 	for (i = 0; i < 2; i++) {
   15375 		/* Disable link */
   15376 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15377 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15378 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15379 
   15380 		/*
   15381 		 * Call gig speed drop workaround on Gig disable before
   15382 		 * accessing any PHY registers
   15383 		 */
   15384 		if (sc->sc_type == WM_T_ICH8)
   15385 			wm_gig_downshift_workaround_ich8lan(sc);
   15386 
   15387 		/* Write VR power-down enable */
   15388 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15389 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15390 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15391 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15392 
   15393 		/* Read it back and test */
   15394 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15395 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15396 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15397 			break;
   15398 
   15399 		/* Issue PHY reset and repeat at most one more time */
   15400 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15401 	}
   15402 }
   15403 
   15404 /*
   15405  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15406  *  @sc: pointer to the HW structure
   15407  *
   15408  *  During S0 to Sx transition, it is possible the link remains at gig
   15409  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15410  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15411  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15412  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15413  *  needs to be written.
   15414  *  Parts that support (and are linked to a partner which support) EEE in
   15415  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15416  *  than 10Mbps w/o EEE.
   15417  */
   15418 static void
   15419 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15420 {
   15421 	device_t dev = sc->sc_dev;
   15422 	struct ethercom *ec = &sc->sc_ethercom;
   15423 	uint32_t phy_ctrl;
   15424 	int rv;
   15425 
   15426 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15427 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15428 
   15429 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15430 
   15431 	if (sc->sc_phytype == WMPHY_I217) {
   15432 		uint16_t devid = sc->sc_pcidevid;
   15433 
   15434 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15435 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15436 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15437 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15438 		    (sc->sc_type >= WM_T_PCH_SPT))
   15439 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15440 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15441 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15442 
   15443 		if (sc->phy.acquire(sc) != 0)
   15444 			goto out;
   15445 
   15446 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15447 			uint16_t eee_advert;
   15448 
   15449 			rv = wm_read_emi_reg_locked(dev,
   15450 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15451 			if (rv)
   15452 				goto release;
   15453 
   15454 			/*
   15455 			 * Disable LPLU if both link partners support 100BaseT
   15456 			 * EEE and 100Full is advertised on both ends of the
   15457 			 * link, and enable Auto Enable LPI since there will
   15458 			 * be no driver to enable LPI while in Sx.
   15459 			 */
   15460 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15461 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15462 				uint16_t anar, phy_reg;
   15463 
   15464 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15465 				    &anar);
   15466 				if (anar & ANAR_TX_FD) {
   15467 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15468 					    PHY_CTRL_NOND0A_LPLU);
   15469 
   15470 					/* Set Auto Enable LPI after link up */
   15471 					sc->phy.readreg_locked(dev, 2,
   15472 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15473 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15474 					sc->phy.writereg_locked(dev, 2,
   15475 					    I217_LPI_GPIO_CTRL, phy_reg);
   15476 				}
   15477 			}
   15478 		}
   15479 
   15480 		/*
   15481 		 * For i217 Intel Rapid Start Technology support,
   15482 		 * when the system is going into Sx and no manageability engine
   15483 		 * is present, the driver must configure proxy to reset only on
   15484 		 * power good.	LPI (Low Power Idle) state must also reset only
   15485 		 * on power good, as well as the MTA (Multicast table array).
   15486 		 * The SMBus release must also be disabled on LCD reset.
   15487 		 */
   15488 
   15489 		/*
   15490 		 * Enable MTA to reset for Intel Rapid Start Technology
   15491 		 * Support
   15492 		 */
   15493 
   15494 release:
   15495 		sc->phy.release(sc);
   15496 	}
   15497 out:
   15498 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15499 
   15500 	if (sc->sc_type == WM_T_ICH8)
   15501 		wm_gig_downshift_workaround_ich8lan(sc);
   15502 
   15503 	if (sc->sc_type >= WM_T_PCH) {
   15504 		wm_oem_bits_config_ich8lan(sc, false);
   15505 
   15506 		/* Reset PHY to activate OEM bits on 82577/8 */
   15507 		if (sc->sc_type == WM_T_PCH)
   15508 			wm_reset_phy(sc);
   15509 
   15510 		if (sc->phy.acquire(sc) != 0)
   15511 			return;
   15512 		wm_write_smbus_addr(sc);
   15513 		sc->phy.release(sc);
   15514 	}
   15515 }
   15516 
   15517 /*
   15518  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15519  *  @sc: pointer to the HW structure
   15520  *
   15521  *  During Sx to S0 transitions on non-managed devices or managed devices
   15522  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15523  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15524  *  the PHY.
   15525  *  On i217, setup Intel Rapid Start Technology.
   15526  */
   15527 static int
   15528 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15529 {
   15530 	device_t dev = sc->sc_dev;
   15531 	int rv;
   15532 
   15533 	if (sc->sc_type < WM_T_PCH2)
   15534 		return 0;
   15535 
   15536 	rv = wm_init_phy_workarounds_pchlan(sc);
   15537 	if (rv != 0)
   15538 		return -1;
   15539 
   15540 	/* For i217 Intel Rapid Start Technology support when the system
   15541 	 * is transitioning from Sx and no manageability engine is present
   15542 	 * configure SMBus to restore on reset, disable proxy, and enable
   15543 	 * the reset on MTA (Multicast table array).
   15544 	 */
   15545 	if (sc->sc_phytype == WMPHY_I217) {
   15546 		uint16_t phy_reg;
   15547 
   15548 		if (sc->phy.acquire(sc) != 0)
   15549 			return -1;
   15550 
   15551 		/* Clear Auto Enable LPI after link up */
   15552 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15553 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15554 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15555 
   15556 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15557 			/* Restore clear on SMB if no manageability engine
   15558 			 * is present
   15559 			 */
   15560 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15561 			    &phy_reg);
   15562 			if (rv != 0)
   15563 				goto release;
   15564 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15565 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15566 
   15567 			/* Disable Proxy */
   15568 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15569 		}
   15570 		/* Enable reset on MTA */
   15571 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15572 		if (rv != 0)
   15573 			goto release;
   15574 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15575 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15576 
   15577 release:
   15578 		sc->phy.release(sc);
   15579 		return rv;
   15580 	}
   15581 
   15582 	return 0;
   15583 }
   15584 
   15585 static void
   15586 wm_enable_wakeup(struct wm_softc *sc)
   15587 {
   15588 	uint32_t reg, pmreg;
   15589 	pcireg_t pmode;
   15590 	int rv = 0;
   15591 
   15592 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15593 		device_xname(sc->sc_dev), __func__));
   15594 
   15595 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15596 	    &pmreg, NULL) == 0)
   15597 		return;
   15598 
   15599 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15600 		goto pme;
   15601 
   15602 	/* Advertise the wakeup capability */
   15603 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15604 	    | CTRL_SWDPIN(3));
   15605 
   15606 	/* Keep the laser running on fiber adapters */
   15607 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15608 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15609 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15610 		reg |= CTRL_EXT_SWDPIN(3);
   15611 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15612 	}
   15613 
   15614 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15615 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15616 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15617 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15618 		wm_suspend_workarounds_ich8lan(sc);
   15619 
   15620 #if 0	/* For the multicast packet */
   15621 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15622 	reg |= WUFC_MC;
   15623 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15624 #endif
   15625 
   15626 	if (sc->sc_type >= WM_T_PCH) {
   15627 		rv = wm_enable_phy_wakeup(sc);
   15628 		if (rv != 0)
   15629 			goto pme;
   15630 	} else {
   15631 		/* Enable wakeup by the MAC */
   15632 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15633 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15634 	}
   15635 
   15636 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15637 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15638 		|| (sc->sc_type == WM_T_PCH2))
   15639 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15640 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15641 
   15642 pme:
   15643 	/* Request PME */
   15644 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15645 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   15646 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15647 		/* For WOL */
   15648 		pmode |= PCI_PMCSR_PME_EN;
   15649 	} else {
   15650 		/* Disable WOL */
   15651 		pmode &= ~PCI_PMCSR_PME_EN;
   15652 	}
   15653 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15654 }
   15655 
   15656 /* Disable ASPM L0s and/or L1 for workaround */
   15657 static void
   15658 wm_disable_aspm(struct wm_softc *sc)
   15659 {
   15660 	pcireg_t reg, mask = 0;
   15661 	unsigned const char *str = "";
   15662 
   15663 	/*
   15664 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15665 	 * space.
   15666 	 */
   15667 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15668 		return;
   15669 
   15670 	switch (sc->sc_type) {
   15671 	case WM_T_82571:
   15672 	case WM_T_82572:
   15673 		/*
   15674 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15675 		 * State Power management L1 State (ASPM L1).
   15676 		 */
   15677 		mask = PCIE_LCSR_ASPM_L1;
   15678 		str = "L1 is";
   15679 		break;
   15680 	case WM_T_82573:
   15681 	case WM_T_82574:
   15682 	case WM_T_82583:
   15683 		/*
   15684 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15685 		 *
   15686 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15687 		 * some chipset.  The document of 82574 and 82583 says that
   15688 		 * disabling L0s with some specific chipset is sufficient,
   15689 		 * but we follow as of the Intel em driver does.
   15690 		 *
   15691 		 * References:
   15692 		 * Errata 8 of the Specification Update of i82573.
   15693 		 * Errata 20 of the Specification Update of i82574.
   15694 		 * Errata 9 of the Specification Update of i82583.
   15695 		 */
   15696 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15697 		str = "L0s and L1 are";
   15698 		break;
   15699 	default:
   15700 		return;
   15701 	}
   15702 
   15703 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15704 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15705 	reg &= ~mask;
   15706 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15707 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15708 
   15709 	/* Print only in wm_attach() */
   15710 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15711 		aprint_verbose_dev(sc->sc_dev,
   15712 		    "ASPM %s disabled to workaround the errata.\n", str);
   15713 }
   15714 
   15715 /* LPLU */
   15716 
   15717 static void
   15718 wm_lplu_d0_disable(struct wm_softc *sc)
   15719 {
   15720 	struct mii_data *mii = &sc->sc_mii;
   15721 	uint32_t reg;
   15722 	uint16_t phyval;
   15723 
   15724 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15725 		device_xname(sc->sc_dev), __func__));
   15726 
   15727 	if (sc->sc_phytype == WMPHY_IFE)
   15728 		return;
   15729 
   15730 	switch (sc->sc_type) {
   15731 	case WM_T_82571:
   15732 	case WM_T_82572:
   15733 	case WM_T_82573:
   15734 	case WM_T_82575:
   15735 	case WM_T_82576:
   15736 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   15737 		phyval &= ~PMR_D0_LPLU;
   15738 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   15739 		break;
   15740 	case WM_T_82580:
   15741 	case WM_T_I350:
   15742 	case WM_T_I210:
   15743 	case WM_T_I211:
   15744 		reg = CSR_READ(sc, WMREG_PHPM);
   15745 		reg &= ~PHPM_D0A_LPLU;
   15746 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15747 		break;
   15748 	case WM_T_82574:
   15749 	case WM_T_82583:
   15750 	case WM_T_ICH8:
   15751 	case WM_T_ICH9:
   15752 	case WM_T_ICH10:
   15753 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15754 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15755 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15756 		CSR_WRITE_FLUSH(sc);
   15757 		break;
   15758 	case WM_T_PCH:
   15759 	case WM_T_PCH2:
   15760 	case WM_T_PCH_LPT:
   15761 	case WM_T_PCH_SPT:
   15762 	case WM_T_PCH_CNP:
   15763 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15764 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15765 		if (wm_phy_resetisblocked(sc) == false)
   15766 			phyval |= HV_OEM_BITS_ANEGNOW;
   15767 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15768 		break;
   15769 	default:
   15770 		break;
   15771 	}
   15772 }
   15773 
   15774 /* EEE */
   15775 
   15776 static int
   15777 wm_set_eee_i350(struct wm_softc *sc)
   15778 {
   15779 	struct ethercom *ec = &sc->sc_ethercom;
   15780 	uint32_t ipcnfg, eeer;
   15781 	uint32_t ipcnfg_mask
   15782 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15783 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15784 
   15785 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15786 
   15787 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15788 	eeer = CSR_READ(sc, WMREG_EEER);
   15789 
   15790 	/* Enable or disable per user setting */
   15791 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15792 		ipcnfg |= ipcnfg_mask;
   15793 		eeer |= eeer_mask;
   15794 	} else {
   15795 		ipcnfg &= ~ipcnfg_mask;
   15796 		eeer &= ~eeer_mask;
   15797 	}
   15798 
   15799 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15800 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15801 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15802 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15803 
   15804 	return 0;
   15805 }
   15806 
   15807 static int
   15808 wm_set_eee_pchlan(struct wm_softc *sc)
   15809 {
   15810 	device_t dev = sc->sc_dev;
   15811 	struct ethercom *ec = &sc->sc_ethercom;
   15812 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15813 	int rv = 0;
   15814 
   15815 	switch (sc->sc_phytype) {
   15816 	case WMPHY_82579:
   15817 		lpa = I82579_EEE_LP_ABILITY;
   15818 		pcs_status = I82579_EEE_PCS_STATUS;
   15819 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15820 		break;
   15821 	case WMPHY_I217:
   15822 		lpa = I217_EEE_LP_ABILITY;
   15823 		pcs_status = I217_EEE_PCS_STATUS;
   15824 		adv_addr = I217_EEE_ADVERTISEMENT;
   15825 		break;
   15826 	default:
   15827 		return 0;
   15828 	}
   15829 
   15830 	if (sc->phy.acquire(sc)) {
   15831 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15832 		return 0;
   15833 	}
   15834 
   15835 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15836 	if (rv != 0)
   15837 		goto release;
   15838 
   15839 	/* Clear bits that enable EEE in various speeds */
   15840 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15841 
   15842 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15843 		/* Save off link partner's EEE ability */
   15844 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15845 		if (rv != 0)
   15846 			goto release;
   15847 
   15848 		/* Read EEE advertisement */
   15849 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15850 			goto release;
   15851 
   15852 		/*
   15853 		 * Enable EEE only for speeds in which the link partner is
   15854 		 * EEE capable and for which we advertise EEE.
   15855 		 */
   15856 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15857 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15858 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15859 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15860 			if ((data & ANLPAR_TX_FD) != 0)
   15861 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15862 			else {
   15863 				/*
   15864 				 * EEE is not supported in 100Half, so ignore
   15865 				 * partner's EEE in 100 ability if full-duplex
   15866 				 * is not advertised.
   15867 				 */
   15868 				sc->eee_lp_ability
   15869 				    &= ~AN_EEEADVERT_100_TX;
   15870 			}
   15871 		}
   15872 	}
   15873 
   15874 	if (sc->sc_phytype == WMPHY_82579) {
   15875 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15876 		if (rv != 0)
   15877 			goto release;
   15878 
   15879 		data &= ~I82579_LPI_PLL_SHUT_100;
   15880 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15881 	}
   15882 
   15883 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15884 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15885 		goto release;
   15886 
   15887 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15888 release:
   15889 	sc->phy.release(sc);
   15890 
   15891 	return rv;
   15892 }
   15893 
   15894 static int
   15895 wm_set_eee(struct wm_softc *sc)
   15896 {
   15897 	struct ethercom *ec = &sc->sc_ethercom;
   15898 
   15899 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15900 		return 0;
   15901 
   15902 	if (sc->sc_type == WM_T_I354) {
   15903 		/* I354 uses an external PHY */
   15904 		return 0; /* not yet */
   15905 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15906 		return wm_set_eee_i350(sc);
   15907 	else if (sc->sc_type >= WM_T_PCH2)
   15908 		return wm_set_eee_pchlan(sc);
   15909 
   15910 	return 0;
   15911 }
   15912 
   15913 /*
   15914  * Workarounds (mainly PHY related).
   15915  * Basically, PHY's workarounds are in the PHY drivers.
   15916  */
   15917 
   15918 /* Work-around for 82566 Kumeran PCS lock loss */
   15919 static int
   15920 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15921 {
   15922 	struct mii_data *mii = &sc->sc_mii;
   15923 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15924 	int i, reg, rv;
   15925 	uint16_t phyreg;
   15926 
   15927 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15928 		device_xname(sc->sc_dev), __func__));
   15929 
   15930 	/* If the link is not up, do nothing */
   15931 	if ((status & STATUS_LU) == 0)
   15932 		return 0;
   15933 
   15934 	/* Nothing to do if the link is other than 1Gbps */
   15935 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15936 		return 0;
   15937 
   15938 	for (i = 0; i < 10; i++) {
   15939 		/* read twice */
   15940 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15941 		if (rv != 0)
   15942 			return rv;
   15943 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15944 		if (rv != 0)
   15945 			return rv;
   15946 
   15947 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15948 			goto out;	/* GOOD! */
   15949 
   15950 		/* Reset the PHY */
   15951 		wm_reset_phy(sc);
   15952 		delay(5*1000);
   15953 	}
   15954 
   15955 	/* Disable GigE link negotiation */
   15956 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15957 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15958 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15959 
   15960 	/*
   15961 	 * Call gig speed drop workaround on Gig disable before accessing
   15962 	 * any PHY registers.
   15963 	 */
   15964 	wm_gig_downshift_workaround_ich8lan(sc);
   15965 
   15966 out:
   15967 	return 0;
   15968 }
   15969 
   15970 /*
   15971  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15972  *  @sc: pointer to the HW structure
   15973  *
   15974  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15975  *  LPLU, Gig disable, MDIC PHY reset):
   15976  *    1) Set Kumeran Near-end loopback
   15977  *    2) Clear Kumeran Near-end loopback
   15978  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15979  */
   15980 static void
   15981 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15982 {
   15983 	uint16_t kmreg;
   15984 
   15985 	/* Only for igp3 */
   15986 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15987 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15988 			return;
   15989 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15990 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15991 			return;
   15992 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15993 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15994 	}
   15995 }
   15996 
   15997 /*
   15998  * Workaround for pch's PHYs
   15999  * XXX should be moved to new PHY driver?
   16000  */
   16001 static int
   16002 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16003 {
   16004 	device_t dev = sc->sc_dev;
   16005 	struct mii_data *mii = &sc->sc_mii;
   16006 	struct mii_softc *child;
   16007 	uint16_t phy_data, phyrev = 0;
   16008 	int phytype = sc->sc_phytype;
   16009 	int rv;
   16010 
   16011 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16012 		device_xname(dev), __func__));
   16013 	KASSERT(sc->sc_type == WM_T_PCH);
   16014 
   16015 	/* Set MDIO slow mode before any other MDIO access */
   16016 	if (phytype == WMPHY_82577)
   16017 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16018 			return rv;
   16019 
   16020 	child = LIST_FIRST(&mii->mii_phys);
   16021 	if (child != NULL)
   16022 		phyrev = child->mii_mpd_rev;
   16023 
   16024 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16025 	if ((child != NULL) &&
   16026 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16027 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16028 		/* Disable generation of early preamble (0x4431) */
   16029 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16030 		    &phy_data);
   16031 		if (rv != 0)
   16032 			return rv;
   16033 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16034 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16035 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16036 		    phy_data);
   16037 		if (rv != 0)
   16038 			return rv;
   16039 
   16040 		/* Preamble tuning for SSC */
   16041 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16042 		if (rv != 0)
   16043 			return rv;
   16044 	}
   16045 
   16046 	/* 82578 */
   16047 	if (phytype == WMPHY_82578) {
   16048 		/*
   16049 		 * Return registers to default by doing a soft reset then
   16050 		 * writing 0x3140 to the control register
   16051 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16052 		 */
   16053 		if ((child != NULL) && (phyrev < 2)) {
   16054 			PHY_RESET(child);
   16055 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16056 			if (rv != 0)
   16057 				return rv;
   16058 		}
   16059 	}
   16060 
   16061 	/* Select page 0 */
   16062 	if ((rv = sc->phy.acquire(sc)) != 0)
   16063 		return rv;
   16064 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16065 	sc->phy.release(sc);
   16066 	if (rv != 0)
   16067 		return rv;
   16068 
   16069 	/*
   16070 	 * Configure the K1 Si workaround during phy reset assuming there is
   16071 	 * link so that it disables K1 if link is in 1Gbps.
   16072 	 */
   16073 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16074 		return rv;
   16075 
   16076 	/* Workaround for link disconnects on a busy hub in half duplex */
   16077 	rv = sc->phy.acquire(sc);
   16078 	if (rv)
   16079 		return rv;
   16080 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16081 	if (rv)
   16082 		goto release;
   16083 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16084 	    phy_data & 0x00ff);
   16085 	if (rv)
   16086 		goto release;
   16087 
   16088 	/* Set MSE higher to enable link to stay up when noise is high */
   16089 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16090 release:
   16091 	sc->phy.release(sc);
   16092 
   16093 	return rv;
   16094 }
   16095 
   16096 /*
   16097  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16098  *  @sc:   pointer to the HW structure
   16099  */
   16100 static void
   16101 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16102 {
   16103 
   16104 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16105 		device_xname(sc->sc_dev), __func__));
   16106 
   16107 	if (sc->phy.acquire(sc) != 0)
   16108 		return;
   16109 
   16110 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16111 
   16112 	sc->phy.release(sc);
   16113 }
   16114 
   16115 static void
   16116 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16117 {
   16118 	device_t dev = sc->sc_dev;
   16119 	uint32_t mac_reg;
   16120 	uint16_t i, wuce;
   16121 	int count;
   16122 
   16123 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16124 		device_xname(dev), __func__));
   16125 
   16126 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16127 		return;
   16128 
   16129 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16130 	count = wm_rar_count(sc);
   16131 	for (i = 0; i < count; i++) {
   16132 		uint16_t lo, hi;
   16133 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16134 		lo = (uint16_t)(mac_reg & 0xffff);
   16135 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16136 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16137 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16138 
   16139 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16140 		lo = (uint16_t)(mac_reg & 0xffff);
   16141 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16142 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16143 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16144 	}
   16145 
   16146 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16147 }
   16148 
   16149 /*
   16150  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16151  *  with 82579 PHY
   16152  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16153  */
   16154 static int
   16155 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16156 {
   16157 	device_t dev = sc->sc_dev;
   16158 	int rar_count;
   16159 	int rv;
   16160 	uint32_t mac_reg;
   16161 	uint16_t dft_ctrl, data;
   16162 	uint16_t i;
   16163 
   16164 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16165 		device_xname(dev), __func__));
   16166 
   16167 	if (sc->sc_type < WM_T_PCH2)
   16168 		return 0;
   16169 
   16170 	/* Acquire PHY semaphore */
   16171 	rv = sc->phy.acquire(sc);
   16172 	if (rv != 0)
   16173 		return rv;
   16174 
   16175 	/* Disable Rx path while enabling/disabling workaround */
   16176 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16177 	if (rv != 0)
   16178 		goto out;
   16179 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16180 	    dft_ctrl | (1 << 14));
   16181 	if (rv != 0)
   16182 		goto out;
   16183 
   16184 	if (enable) {
   16185 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16186 		 * SHRAL/H) and initial CRC values to the MAC
   16187 		 */
   16188 		rar_count = wm_rar_count(sc);
   16189 		for (i = 0; i < rar_count; i++) {
   16190 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16191 			uint32_t addr_high, addr_low;
   16192 
   16193 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16194 			if (!(addr_high & RAL_AV))
   16195 				continue;
   16196 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16197 			mac_addr[0] = (addr_low & 0xFF);
   16198 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16199 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16200 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16201 			mac_addr[4] = (addr_high & 0xFF);
   16202 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16203 
   16204 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16205 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16206 		}
   16207 
   16208 		/* Write Rx addresses to the PHY */
   16209 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16210 	}
   16211 
   16212 	/*
   16213 	 * If enable ==
   16214 	 *	true: Enable jumbo frame workaround in the MAC.
   16215 	 *	false: Write MAC register values back to h/w defaults.
   16216 	 */
   16217 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16218 	if (enable) {
   16219 		mac_reg &= ~(1 << 14);
   16220 		mac_reg |= (7 << 15);
   16221 	} else
   16222 		mac_reg &= ~(0xf << 14);
   16223 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16224 
   16225 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16226 	if (enable) {
   16227 		mac_reg |= RCTL_SECRC;
   16228 		sc->sc_rctl |= RCTL_SECRC;
   16229 		sc->sc_flags |= WM_F_CRC_STRIP;
   16230 	} else {
   16231 		mac_reg &= ~RCTL_SECRC;
   16232 		sc->sc_rctl &= ~RCTL_SECRC;
   16233 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16234 	}
   16235 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16236 
   16237 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16238 	if (rv != 0)
   16239 		goto out;
   16240 	if (enable)
   16241 		data |= 1 << 0;
   16242 	else
   16243 		data &= ~(1 << 0);
   16244 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16245 	if (rv != 0)
   16246 		goto out;
   16247 
   16248 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16249 	if (rv != 0)
   16250 		goto out;
   16251 	/*
   16252 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16253 	 * on both the enable case and the disable case. Is it correct?
   16254 	 */
   16255 	data &= ~(0xf << 8);
   16256 	data |= (0xb << 8);
   16257 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16258 	if (rv != 0)
   16259 		goto out;
   16260 
   16261 	/*
   16262 	 * If enable ==
   16263 	 *	true: Enable jumbo frame workaround in the PHY.
   16264 	 *	false: Write PHY register values back to h/w defaults.
   16265 	 */
   16266 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16267 	if (rv != 0)
   16268 		goto out;
   16269 	data &= ~(0x7F << 5);
   16270 	if (enable)
   16271 		data |= (0x37 << 5);
   16272 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16273 	if (rv != 0)
   16274 		goto out;
   16275 
   16276 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16277 	if (rv != 0)
   16278 		goto out;
   16279 	if (enable)
   16280 		data &= ~(1 << 13);
   16281 	else
   16282 		data |= (1 << 13);
   16283 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16284 	if (rv != 0)
   16285 		goto out;
   16286 
   16287 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16288 	if (rv != 0)
   16289 		goto out;
   16290 	data &= ~(0x3FF << 2);
   16291 	if (enable)
   16292 		data |= (I82579_TX_PTR_GAP << 2);
   16293 	else
   16294 		data |= (0x8 << 2);
   16295 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16296 	if (rv != 0)
   16297 		goto out;
   16298 
   16299 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16300 	    enable ? 0xf100 : 0x7e00);
   16301 	if (rv != 0)
   16302 		goto out;
   16303 
   16304 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16305 	if (rv != 0)
   16306 		goto out;
   16307 	if (enable)
   16308 		data |= 1 << 10;
   16309 	else
   16310 		data &= ~(1 << 10);
   16311 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16312 	if (rv != 0)
   16313 		goto out;
   16314 
   16315 	/* Re-enable Rx path after enabling/disabling workaround */
   16316 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16317 	    dft_ctrl & ~(1 << 14));
   16318 
   16319 out:
   16320 	sc->phy.release(sc);
   16321 
   16322 	return rv;
   16323 }
   16324 
   16325 /*
   16326  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16327  *  done after every PHY reset.
   16328  */
   16329 static int
   16330 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16331 {
   16332 	device_t dev = sc->sc_dev;
   16333 	int rv;
   16334 
   16335 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16336 		device_xname(dev), __func__));
   16337 	KASSERT(sc->sc_type == WM_T_PCH2);
   16338 
   16339 	/* Set MDIO slow mode before any other MDIO access */
   16340 	rv = wm_set_mdio_slow_mode_hv(sc);
   16341 	if (rv != 0)
   16342 		return rv;
   16343 
   16344 	rv = sc->phy.acquire(sc);
   16345 	if (rv != 0)
   16346 		return rv;
   16347 	/* Set MSE higher to enable link to stay up when noise is high */
   16348 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16349 	if (rv != 0)
   16350 		goto release;
   16351 	/* Drop link after 5 times MSE threshold was reached */
   16352 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16353 release:
   16354 	sc->phy.release(sc);
   16355 
   16356 	return rv;
   16357 }
   16358 
   16359 /**
   16360  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   16361  *  @link: link up bool flag
   16362  *
   16363  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   16364  *  preventing further DMA write requests.  Workaround the issue by disabling
   16365  *  the de-assertion of the clock request when in 1Gpbs mode.
   16366  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   16367  *  speeds in order to avoid Tx hangs.
   16368  **/
   16369 static int
   16370 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   16371 {
   16372 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   16373 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16374 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   16375 	uint16_t phyreg;
   16376 
   16377 	if (link && (speed == STATUS_SPEED_1000)) {
   16378 		sc->phy.acquire(sc);
   16379 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16380 		    &phyreg);
   16381 		if (rv != 0)
   16382 			goto release;
   16383 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16384 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   16385 		if (rv != 0)
   16386 			goto release;
   16387 		delay(20);
   16388 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   16389 
   16390 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16391 		    &phyreg);
   16392 release:
   16393 		sc->phy.release(sc);
   16394 		return rv;
   16395 	}
   16396 
   16397 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   16398 
   16399 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   16400 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   16401 	    || !link
   16402 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   16403 		goto update_fextnvm6;
   16404 
   16405 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   16406 
   16407 	/* Clear link status transmit timeout */
   16408 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   16409 	if (speed == STATUS_SPEED_100) {
   16410 		/* Set inband Tx timeout to 5x10us for 100Half */
   16411 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16412 
   16413 		/* Do not extend the K1 entry latency for 100Half */
   16414 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16415 	} else {
   16416 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   16417 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16418 
   16419 		/* Extend the K1 entry latency for 10 Mbps */
   16420 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16421 	}
   16422 
   16423 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   16424 
   16425 update_fextnvm6:
   16426 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   16427 	return 0;
   16428 }
   16429 
   16430 /*
   16431  *  wm_k1_gig_workaround_hv - K1 Si workaround
   16432  *  @sc:   pointer to the HW structure
   16433  *  @link: link up bool flag
   16434  *
   16435  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   16436  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   16437  *  If link is down, the function will restore the default K1 setting located
   16438  *  in the NVM.
   16439  */
   16440 static int
   16441 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   16442 {
   16443 	int k1_enable = sc->sc_nvm_k1_enabled;
   16444 
   16445 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16446 		device_xname(sc->sc_dev), __func__));
   16447 
   16448 	if (sc->phy.acquire(sc) != 0)
   16449 		return -1;
   16450 
   16451 	if (link) {
   16452 		k1_enable = 0;
   16453 
   16454 		/* Link stall fix for link up */
   16455 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16456 		    0x0100);
   16457 	} else {
   16458 		/* Link stall fix for link down */
   16459 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16460 		    0x4100);
   16461 	}
   16462 
   16463 	wm_configure_k1_ich8lan(sc, k1_enable);
   16464 	sc->phy.release(sc);
   16465 
   16466 	return 0;
   16467 }
   16468 
   16469 /*
   16470  *  wm_k1_workaround_lv - K1 Si workaround
   16471  *  @sc:   pointer to the HW structure
   16472  *
   16473  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   16474  *  Disable K1 for 1000 and 100 speeds
   16475  */
   16476 static int
   16477 wm_k1_workaround_lv(struct wm_softc *sc)
   16478 {
   16479 	uint32_t reg;
   16480 	uint16_t phyreg;
   16481 	int rv;
   16482 
   16483 	if (sc->sc_type != WM_T_PCH2)
   16484 		return 0;
   16485 
   16486 	/* Set K1 beacon duration based on 10Mbps speed */
   16487 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16488 	if (rv != 0)
   16489 		return rv;
   16490 
   16491 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16492 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16493 		if (phyreg &
   16494 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16495 			/* LV 1G/100 Packet drop issue wa  */
   16496 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16497 			    &phyreg);
   16498 			if (rv != 0)
   16499 				return rv;
   16500 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16501 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16502 			    phyreg);
   16503 			if (rv != 0)
   16504 				return rv;
   16505 		} else {
   16506 			/* For 10Mbps */
   16507 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16508 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16509 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16510 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16511 		}
   16512 	}
   16513 
   16514 	return 0;
   16515 }
   16516 
   16517 /*
   16518  *  wm_link_stall_workaround_hv - Si workaround
   16519  *  @sc: pointer to the HW structure
   16520  *
   16521  *  This function works around a Si bug where the link partner can get
   16522  *  a link up indication before the PHY does. If small packets are sent
   16523  *  by the link partner they can be placed in the packet buffer without
   16524  *  being properly accounted for by the PHY and will stall preventing
   16525  *  further packets from being received.  The workaround is to clear the
   16526  *  packet buffer after the PHY detects link up.
   16527  */
   16528 static int
   16529 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16530 {
   16531 	uint16_t phyreg;
   16532 
   16533 	if (sc->sc_phytype != WMPHY_82578)
   16534 		return 0;
   16535 
   16536 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16537 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16538 	if ((phyreg & BMCR_LOOP) != 0)
   16539 		return 0;
   16540 
   16541 	/* Check if link is up and at 1Gbps */
   16542 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16543 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16544 	    | BM_CS_STATUS_SPEED_MASK;
   16545 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16546 		| BM_CS_STATUS_SPEED_1000))
   16547 		return 0;
   16548 
   16549 	delay(200 * 1000);	/* XXX too big */
   16550 
   16551 	/* Flush the packets in the fifo buffer */
   16552 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16553 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16554 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16555 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16556 
   16557 	return 0;
   16558 }
   16559 
   16560 static int
   16561 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16562 {
   16563 	int rv;
   16564 	uint16_t reg;
   16565 
   16566 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16567 	if (rv != 0)
   16568 		return rv;
   16569 
   16570 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16571 	    reg | HV_KMRN_MDIO_SLOW);
   16572 }
   16573 
   16574 /*
   16575  *  wm_configure_k1_ich8lan - Configure K1 power state
   16576  *  @sc: pointer to the HW structure
   16577  *  @enable: K1 state to configure
   16578  *
   16579  *  Configure the K1 power state based on the provided parameter.
   16580  *  Assumes semaphore already acquired.
   16581  */
   16582 static void
   16583 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16584 {
   16585 	uint32_t ctrl, ctrl_ext, tmp;
   16586 	uint16_t kmreg;
   16587 	int rv;
   16588 
   16589 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16590 
   16591 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16592 	if (rv != 0)
   16593 		return;
   16594 
   16595 	if (k1_enable)
   16596 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16597 	else
   16598 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16599 
   16600 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16601 	if (rv != 0)
   16602 		return;
   16603 
   16604 	delay(20);
   16605 
   16606 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16607 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16608 
   16609 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16610 	tmp |= CTRL_FRCSPD;
   16611 
   16612 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16613 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16614 	CSR_WRITE_FLUSH(sc);
   16615 	delay(20);
   16616 
   16617 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16618 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16619 	CSR_WRITE_FLUSH(sc);
   16620 	delay(20);
   16621 
   16622 	return;
   16623 }
   16624 
   16625 /* special case - for 82575 - need to do manual init ... */
   16626 static void
   16627 wm_reset_init_script_82575(struct wm_softc *sc)
   16628 {
   16629 	/*
   16630 	 * Remark: this is untested code - we have no board without EEPROM
   16631 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16632 	 */
   16633 
   16634 	/* SerDes configuration via SERDESCTRL */
   16635 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16636 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16637 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16638 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16639 
   16640 	/* CCM configuration via CCMCTL register */
   16641 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16642 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16643 
   16644 	/* PCIe lanes configuration */
   16645 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16646 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16647 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16648 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16649 
   16650 	/* PCIe PLL Configuration */
   16651 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16652 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16653 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16654 }
   16655 
   16656 static void
   16657 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16658 {
   16659 	uint32_t reg;
   16660 	uint16_t nvmword;
   16661 	int rv;
   16662 
   16663 	if (sc->sc_type != WM_T_82580)
   16664 		return;
   16665 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16666 		return;
   16667 
   16668 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16669 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16670 	if (rv != 0) {
   16671 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16672 		    __func__);
   16673 		return;
   16674 	}
   16675 
   16676 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16677 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16678 		reg |= MDICNFG_DEST;
   16679 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16680 		reg |= MDICNFG_COM_MDIO;
   16681 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16682 }
   16683 
   16684 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16685 
   16686 static bool
   16687 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16688 {
   16689 	uint32_t reg;
   16690 	uint16_t id1, id2;
   16691 	int i, rv;
   16692 
   16693 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16694 		device_xname(sc->sc_dev), __func__));
   16695 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16696 
   16697 	id1 = id2 = 0xffff;
   16698 	for (i = 0; i < 2; i++) {
   16699 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16700 		    &id1);
   16701 		if ((rv != 0) || MII_INVALIDID(id1))
   16702 			continue;
   16703 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16704 		    &id2);
   16705 		if ((rv != 0) || MII_INVALIDID(id2))
   16706 			continue;
   16707 		break;
   16708 	}
   16709 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16710 		goto out;
   16711 
   16712 	/*
   16713 	 * In case the PHY needs to be in mdio slow mode,
   16714 	 * set slow mode and try to get the PHY id again.
   16715 	 */
   16716 	rv = 0;
   16717 	if (sc->sc_type < WM_T_PCH_LPT) {
   16718 		sc->phy.release(sc);
   16719 		wm_set_mdio_slow_mode_hv(sc);
   16720 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16721 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16722 		sc->phy.acquire(sc);
   16723 	}
   16724 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16725 		device_printf(sc->sc_dev, "XXX return with false\n");
   16726 		return false;
   16727 	}
   16728 out:
   16729 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16730 		/* Only unforce SMBus if ME is not active */
   16731 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16732 			uint16_t phyreg;
   16733 
   16734 			/* Unforce SMBus mode in PHY */
   16735 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16736 			    CV_SMB_CTRL, &phyreg);
   16737 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16738 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16739 			    CV_SMB_CTRL, phyreg);
   16740 
   16741 			/* Unforce SMBus mode in MAC */
   16742 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16743 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16744 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16745 		}
   16746 	}
   16747 	return true;
   16748 }
   16749 
   16750 static void
   16751 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16752 {
   16753 	uint32_t reg;
   16754 	int i;
   16755 
   16756 	/* Set PHY Config Counter to 50msec */
   16757 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16758 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16759 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16760 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16761 
   16762 	/* Toggle LANPHYPC */
   16763 	reg = CSR_READ(sc, WMREG_CTRL);
   16764 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16765 	reg &= ~CTRL_LANPHYPC_VALUE;
   16766 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16767 	CSR_WRITE_FLUSH(sc);
   16768 	delay(1000);
   16769 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16770 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16771 	CSR_WRITE_FLUSH(sc);
   16772 
   16773 	if (sc->sc_type < WM_T_PCH_LPT)
   16774 		delay(50 * 1000);
   16775 	else {
   16776 		i = 20;
   16777 
   16778 		do {
   16779 			delay(5 * 1000);
   16780 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16781 		    && i--);
   16782 
   16783 		delay(30 * 1000);
   16784 	}
   16785 }
   16786 
   16787 static int
   16788 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16789 {
   16790 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16791 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16792 	uint32_t rxa;
   16793 	uint16_t scale = 0, lat_enc = 0;
   16794 	int32_t obff_hwm = 0;
   16795 	int64_t lat_ns, value;
   16796 
   16797 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16798 		device_xname(sc->sc_dev), __func__));
   16799 
   16800 	if (link) {
   16801 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16802 		uint32_t status;
   16803 		uint16_t speed;
   16804 		pcireg_t preg;
   16805 
   16806 		status = CSR_READ(sc, WMREG_STATUS);
   16807 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16808 		case STATUS_SPEED_10:
   16809 			speed = 10;
   16810 			break;
   16811 		case STATUS_SPEED_100:
   16812 			speed = 100;
   16813 			break;
   16814 		case STATUS_SPEED_1000:
   16815 			speed = 1000;
   16816 			break;
   16817 		default:
   16818 			device_printf(sc->sc_dev, "Unknown speed "
   16819 			    "(status = %08x)\n", status);
   16820 			return -1;
   16821 		}
   16822 
   16823 		/* Rx Packet Buffer Allocation size (KB) */
   16824 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16825 
   16826 		/*
   16827 		 * Determine the maximum latency tolerated by the device.
   16828 		 *
   16829 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16830 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16831 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16832 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16833 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16834 		 */
   16835 		lat_ns = ((int64_t)rxa * 1024 -
   16836 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16837 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16838 		if (lat_ns < 0)
   16839 			lat_ns = 0;
   16840 		else
   16841 			lat_ns /= speed;
   16842 		value = lat_ns;
   16843 
   16844 		while (value > LTRV_VALUE) {
   16845 			scale ++;
   16846 			value = howmany(value, __BIT(5));
   16847 		}
   16848 		if (scale > LTRV_SCALE_MAX) {
   16849 			device_printf(sc->sc_dev,
   16850 			    "Invalid LTR latency scale %d\n", scale);
   16851 			return -1;
   16852 		}
   16853 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16854 
   16855 		/* Determine the maximum latency tolerated by the platform */
   16856 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16857 		    WM_PCI_LTR_CAP_LPT);
   16858 		max_snoop = preg & 0xffff;
   16859 		max_nosnoop = preg >> 16;
   16860 
   16861 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16862 
   16863 		if (lat_enc > max_ltr_enc) {
   16864 			lat_enc = max_ltr_enc;
   16865 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16866 			    * PCI_LTR_SCALETONS(
   16867 				    __SHIFTOUT(lat_enc,
   16868 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16869 		}
   16870 
   16871 		if (lat_ns) {
   16872 			lat_ns *= speed * 1000;
   16873 			lat_ns /= 8;
   16874 			lat_ns /= 1000000000;
   16875 			obff_hwm = (int32_t)(rxa - lat_ns);
   16876 		}
   16877 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16878 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16879 			    "(rxa = %d, lat_ns = %d)\n",
   16880 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16881 			return -1;
   16882 		}
   16883 	}
   16884 	/* Snoop and No-Snoop latencies the same */
   16885 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16886 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16887 
   16888 	/* Set OBFF high water mark */
   16889 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16890 	reg |= obff_hwm;
   16891 	CSR_WRITE(sc, WMREG_SVT, reg);
   16892 
   16893 	/* Enable OBFF */
   16894 	reg = CSR_READ(sc, WMREG_SVCR);
   16895 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16896 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16897 
   16898 	return 0;
   16899 }
   16900 
   16901 /*
   16902  * I210 Errata 25 and I211 Errata 10
   16903  * Slow System Clock.
   16904  *
   16905  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   16906  */
   16907 static int
   16908 wm_pll_workaround_i210(struct wm_softc *sc)
   16909 {
   16910 	uint32_t mdicnfg, wuc;
   16911 	uint32_t reg;
   16912 	pcireg_t pcireg;
   16913 	uint32_t pmreg;
   16914 	uint16_t nvmword, tmp_nvmword;
   16915 	uint16_t phyval;
   16916 	bool wa_done = false;
   16917 	int i, rv = 0;
   16918 
   16919 	/* Get Power Management cap offset */
   16920 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16921 	    &pmreg, NULL) == 0)
   16922 		return -1;
   16923 
   16924 	/* Save WUC and MDICNFG registers */
   16925 	wuc = CSR_READ(sc, WMREG_WUC);
   16926 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16927 
   16928 	reg = mdicnfg & ~MDICNFG_DEST;
   16929 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16930 
   16931 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   16932 		/*
   16933 		 * The default value of the Initialization Control Word 1
   16934 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   16935 		 */
   16936 		nvmword = INVM_DEFAULT_AL;
   16937 	}
   16938 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16939 
   16940 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16941 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16942 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16943 
   16944 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16945 			rv = 0;
   16946 			break; /* OK */
   16947 		} else
   16948 			rv = -1;
   16949 
   16950 		wa_done = true;
   16951 		/* Directly reset the internal PHY */
   16952 		reg = CSR_READ(sc, WMREG_CTRL);
   16953 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16954 
   16955 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16956 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16957 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16958 
   16959 		CSR_WRITE(sc, WMREG_WUC, 0);
   16960 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16961 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16962 
   16963 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16964 		    pmreg + PCI_PMCSR);
   16965 		pcireg |= PCI_PMCSR_STATE_D3;
   16966 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16967 		    pmreg + PCI_PMCSR, pcireg);
   16968 		delay(1000);
   16969 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16970 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16971 		    pmreg + PCI_PMCSR, pcireg);
   16972 
   16973 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16974 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16975 
   16976 		/* Restore WUC register */
   16977 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16978 	}
   16979 
   16980 	/* Restore MDICNFG setting */
   16981 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16982 	if (wa_done)
   16983 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16984 	return rv;
   16985 }
   16986 
   16987 static void
   16988 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16989 {
   16990 	uint32_t reg;
   16991 
   16992 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16993 		device_xname(sc->sc_dev), __func__));
   16994 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16995 	    || (sc->sc_type == WM_T_PCH_CNP));
   16996 
   16997 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16998 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16999 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   17000 
   17001 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17002 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17003 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17004 }
   17005 
   17006 /* Sysctl function */
   17007 #ifdef WM_DEBUG
   17008 static int
   17009 wm_sysctl_debug(SYSCTLFN_ARGS)
   17010 {
   17011 	struct sysctlnode node = *rnode;
   17012 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17013 	uint32_t dflags;
   17014 	int error;
   17015 
   17016 	dflags = sc->sc_debug;
   17017 	node.sysctl_data = &dflags;
   17018 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17019 
   17020 	if (error || newp == NULL)
   17021 		return error;
   17022 
   17023 	sc->sc_debug = dflags;
   17024 
   17025 	return 0;
   17026 }
   17027 #endif
   17028