Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.699
      1 /*	$NetBSD: if_wm.c,v 1.699 2021/02/17 08:15:43 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.699 2021/02/17 08:15:43 knakahara Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <net/rss_config.h>
    121 
    122 #include <netinet/in.h>			/* XXX for struct ip */
    123 #include <netinet/in_systm.h>		/* XXX for struct ip */
    124 #include <netinet/ip.h>			/* XXX for struct ip */
    125 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    126 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    127 
    128 #include <sys/bus.h>
    129 #include <sys/intr.h>
    130 #include <machine/endian.h>
    131 
    132 #include <dev/mii/mii.h>
    133 #include <dev/mii/mdio.h>
    134 #include <dev/mii/miivar.h>
    135 #include <dev/mii/miidevs.h>
    136 #include <dev/mii/mii_bitbang.h>
    137 #include <dev/mii/ikphyreg.h>
    138 #include <dev/mii/igphyreg.h>
    139 #include <dev/mii/igphyvar.h>
    140 #include <dev/mii/inbmphyreg.h>
    141 #include <dev/mii/ihphyreg.h>
    142 #include <dev/mii/makphyreg.h>
    143 
    144 #include <dev/pci/pcireg.h>
    145 #include <dev/pci/pcivar.h>
    146 #include <dev/pci/pcidevs.h>
    147 
    148 #include <dev/pci/if_wmreg.h>
    149 #include <dev/pci/if_wmvar.h>
    150 
    151 #ifdef WM_DEBUG
    152 #define	WM_DEBUG_LINK		__BIT(0)
    153 #define	WM_DEBUG_TX		__BIT(1)
    154 #define	WM_DEBUG_RX		__BIT(2)
    155 #define	WM_DEBUG_GMII		__BIT(3)
    156 #define	WM_DEBUG_MANAGE		__BIT(4)
    157 #define	WM_DEBUG_NVM		__BIT(5)
    158 #define	WM_DEBUG_INIT		__BIT(6)
    159 #define	WM_DEBUG_LOCK		__BIT(7)
    160 
    161 #if 0
    162 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    163 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    164 	WM_DEBUG_LOCK
    165 #endif
    166 
    167 #define	DPRINTF(sc, x, y)			  \
    168 	do {					  \
    169 		if ((sc)->sc_debug & (x))	  \
    170 			printf y;		  \
    171 	} while (0)
    172 #else
    173 #define	DPRINTF(sc, x, y)	__nothing
    174 #endif /* WM_DEBUG */
    175 
    176 #ifdef NET_MPSAFE
    177 #define WM_MPSAFE	1
    178 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    179 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    180 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    181 #else
    182 #define WM_CALLOUT_FLAGS	0
    183 #define WM_SOFTINT_FLAGS	0
    184 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    185 #endif
    186 
    187 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    188 
    189 /*
    190  * This device driver's max interrupt numbers.
    191  */
    192 #define WM_MAX_NQUEUEINTR	16
    193 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    194 
    195 #ifndef WM_DISABLE_MSI
    196 #define	WM_DISABLE_MSI 0
    197 #endif
    198 #ifndef WM_DISABLE_MSIX
    199 #define	WM_DISABLE_MSIX 0
    200 #endif
    201 
    202 int wm_disable_msi = WM_DISABLE_MSI;
    203 int wm_disable_msix = WM_DISABLE_MSIX;
    204 
    205 #ifndef WM_WATCHDOG_TIMEOUT
    206 #define WM_WATCHDOG_TIMEOUT 5
    207 #endif
    208 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    209 
    210 /*
    211  * Transmit descriptor list size.  Due to errata, we can only have
    212  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    213  * on >= 82544. We tell the upper layers that they can queue a lot
    214  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    215  * of them at a time.
    216  *
    217  * We allow up to 64 DMA segments per packet.  Pathological packet
    218  * chains containing many small mbufs have been observed in zero-copy
    219  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    220  * m_defrag() is called to reduce it.
    221  */
    222 #define	WM_NTXSEGS		64
    223 #define	WM_IFQUEUELEN		256
    224 #define	WM_TXQUEUELEN_MAX	64
    225 #define	WM_TXQUEUELEN_MAX_82547	16
    226 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    227 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    228 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    229 #define	WM_NTXDESC_82542	256
    230 #define	WM_NTXDESC_82544	4096
    231 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    232 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    233 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    234 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    235 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    236 
    237 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    238 
    239 #define	WM_TXINTERQSIZE		256
    240 
    241 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    242 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    243 #endif
    244 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    245 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    246 #endif
    247 
    248 /*
    249  * Receive descriptor list size.  We have one Rx buffer for normal
    250  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    251  * packet.  We allocate 256 receive descriptors, each with a 2k
    252  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    253  */
    254 #define	WM_NRXDESC		256U
    255 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    256 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    257 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    258 
    259 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    260 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    261 #endif
    262 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    263 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    264 #endif
    265 
    266 typedef union txdescs {
    267 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    268 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    269 } txdescs_t;
    270 
    271 typedef union rxdescs {
    272 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    273 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    274 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    275 } rxdescs_t;
    276 
    277 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    278 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    279 
    280 /*
    281  * Software state for transmit jobs.
    282  */
    283 struct wm_txsoft {
    284 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    285 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    286 	int txs_firstdesc;		/* first descriptor in packet */
    287 	int txs_lastdesc;		/* last descriptor in packet */
    288 	int txs_ndesc;			/* # of descriptors used */
    289 };
    290 
    291 /*
    292  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    293  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    294  * them together.
    295  */
    296 struct wm_rxsoft {
    297 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    298 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    299 };
    300 
    301 #define WM_LINKUP_TIMEOUT	50
    302 
    303 static uint16_t swfwphysem[] = {
    304 	SWFW_PHY0_SM,
    305 	SWFW_PHY1_SM,
    306 	SWFW_PHY2_SM,
    307 	SWFW_PHY3_SM
    308 };
    309 
    310 static const uint32_t wm_82580_rxpbs_table[] = {
    311 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    312 };
    313 
    314 struct wm_softc;
    315 
    316 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    317 #if !defined(WM_EVENT_COUNTERS)
    318 #define WM_EVENT_COUNTERS 1
    319 #endif
    320 #endif
    321 
    322 #ifdef WM_EVENT_COUNTERS
    323 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    324 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    325 	struct evcnt qname##_ev_##evname;
    326 
    327 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    328 	do {								\
    329 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    330 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    331 		    "%s%02d%s", #qname, (qnum), #evname);		\
    332 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    333 		    (evtype), NULL, (xname),				\
    334 		    (q)->qname##_##evname##_evcnt_name);		\
    335 	} while (0)
    336 
    337 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    338 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    339 
    340 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    341 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    342 
    343 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    344 	evcnt_detach(&(q)->qname##_ev_##evname);
    345 #endif /* WM_EVENT_COUNTERS */
    346 
    347 struct wm_txqueue {
    348 	kmutex_t *txq_lock;		/* lock for tx operations */
    349 
    350 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    351 
    352 	/* Software state for the transmit descriptors. */
    353 	int txq_num;			/* must be a power of two */
    354 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    355 
    356 	/* TX control data structures. */
    357 	int txq_ndesc;			/* must be a power of two */
    358 	size_t txq_descsize;		/* a tx descriptor size */
    359 	txdescs_t *txq_descs_u;
    360 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    361 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    362 	int txq_desc_rseg;		/* real number of control segment */
    363 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    364 #define	txq_descs	txq_descs_u->sctxu_txdescs
    365 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    366 
    367 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    368 
    369 	int txq_free;			/* number of free Tx descriptors */
    370 	int txq_next;			/* next ready Tx descriptor */
    371 
    372 	int txq_sfree;			/* number of free Tx jobs */
    373 	int txq_snext;			/* next free Tx job */
    374 	int txq_sdirty;			/* dirty Tx jobs */
    375 
    376 	/* These 4 variables are used only on the 82547. */
    377 	int txq_fifo_size;		/* Tx FIFO size */
    378 	int txq_fifo_head;		/* current head of FIFO */
    379 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    380 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    381 
    382 	/*
    383 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    384 	 * CPUs. This queue intermediate them without block.
    385 	 */
    386 	pcq_t *txq_interq;
    387 
    388 	/*
    389 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    390 	 * to manage Tx H/W queue's busy flag.
    391 	 */
    392 	int txq_flags;			/* flags for H/W queue, see below */
    393 #define	WM_TXQ_NO_SPACE		0x1
    394 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    395 
    396 	bool txq_stopping;
    397 
    398 	bool txq_sending;
    399 	time_t txq_lastsent;
    400 
    401 	/* Checksum flags used for previous packet */
    402 	uint32_t 	txq_last_hw_cmd;
    403 	uint8_t 	txq_last_hw_fields;
    404 	uint16_t	txq_last_hw_ipcs;
    405 	uint16_t	txq_last_hw_tucs;
    406 
    407 	uint32_t txq_packets;		/* for AIM */
    408 	uint32_t txq_bytes;		/* for AIM */
    409 #ifdef WM_EVENT_COUNTERS
    410 	/* TX event counters */
    411 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    412 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    413 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    414 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    415 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    416 					    /* XXX not used? */
    417 
    418 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    419 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    420 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    421 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    422 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    423 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    424 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    425 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    426 					    /* other than toomanyseg */
    427 
    428 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    429 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    430 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    431 	WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
    432 
    433 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    434 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    435 #endif /* WM_EVENT_COUNTERS */
    436 };
    437 
    438 struct wm_rxqueue {
    439 	kmutex_t *rxq_lock;		/* lock for rx operations */
    440 
    441 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    442 
    443 	/* Software state for the receive descriptors. */
    444 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    445 
    446 	/* RX control data structures. */
    447 	int rxq_ndesc;			/* must be a power of two */
    448 	size_t rxq_descsize;		/* a rx descriptor size */
    449 	rxdescs_t *rxq_descs_u;
    450 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    451 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    452 	int rxq_desc_rseg;		/* real number of control segment */
    453 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    454 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    455 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    456 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    457 
    458 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    459 
    460 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    461 	int rxq_discard;
    462 	int rxq_len;
    463 	struct mbuf *rxq_head;
    464 	struct mbuf *rxq_tail;
    465 	struct mbuf **rxq_tailp;
    466 
    467 	bool rxq_stopping;
    468 
    469 	uint32_t rxq_packets;		/* for AIM */
    470 	uint32_t rxq_bytes;		/* for AIM */
    471 #ifdef WM_EVENT_COUNTERS
    472 	/* RX event counters */
    473 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    474 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    475 
    476 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    477 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    478 #endif
    479 };
    480 
    481 struct wm_queue {
    482 	int wmq_id;			/* index of TX/RX queues */
    483 	int wmq_intr_idx;		/* index of MSI-X tables */
    484 
    485 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    486 	bool wmq_set_itr;
    487 
    488 	struct wm_txqueue wmq_txq;
    489 	struct wm_rxqueue wmq_rxq;
    490 	char sysctlname[32];		/* Name for sysctl */
    491 
    492 	bool wmq_txrx_use_workqueue;
    493 	struct work wmq_cookie;
    494 	void *wmq_si;
    495 };
    496 
    497 struct wm_phyop {
    498 	int (*acquire)(struct wm_softc *);
    499 	void (*release)(struct wm_softc *);
    500 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    501 	int (*writereg_locked)(device_t, int, int, uint16_t);
    502 	int reset_delay_us;
    503 	bool no_errprint;
    504 };
    505 
    506 struct wm_nvmop {
    507 	int (*acquire)(struct wm_softc *);
    508 	void (*release)(struct wm_softc *);
    509 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    510 };
    511 
    512 /*
    513  * Software state per device.
    514  */
    515 struct wm_softc {
    516 	device_t sc_dev;		/* generic device information */
    517 	bus_space_tag_t sc_st;		/* bus space tag */
    518 	bus_space_handle_t sc_sh;	/* bus space handle */
    519 	bus_size_t sc_ss;		/* bus space size */
    520 	bus_space_tag_t sc_iot;		/* I/O space tag */
    521 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    522 	bus_size_t sc_ios;		/* I/O space size */
    523 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    524 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    525 	bus_size_t sc_flashs;		/* flash registers space size */
    526 	off_t sc_flashreg_offset;	/*
    527 					 * offset to flash registers from
    528 					 * start of BAR
    529 					 */
    530 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    531 
    532 	struct ethercom sc_ethercom;	/* ethernet common data */
    533 	struct mii_data sc_mii;		/* MII/media information */
    534 
    535 	pci_chipset_tag_t sc_pc;
    536 	pcitag_t sc_pcitag;
    537 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    538 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    539 
    540 	uint16_t sc_pcidevid;		/* PCI device ID */
    541 	wm_chip_type sc_type;		/* MAC type */
    542 	int sc_rev;			/* MAC revision */
    543 	wm_phy_type sc_phytype;		/* PHY type */
    544 	uint8_t sc_sfptype;		/* SFP type */
    545 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    546 #define	WM_MEDIATYPE_UNKNOWN		0x00
    547 #define	WM_MEDIATYPE_FIBER		0x01
    548 #define	WM_MEDIATYPE_COPPER		0x02
    549 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    550 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    551 	int sc_flags;			/* flags; see below */
    552 	u_short sc_if_flags;		/* last if_flags */
    553 	int sc_ec_capenable;		/* last ec_capenable */
    554 	int sc_flowflags;		/* 802.3x flow control flags */
    555 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    556 	int sc_align_tweak;
    557 
    558 	void *sc_ihs[WM_MAX_NINTR];	/*
    559 					 * interrupt cookie.
    560 					 * - legacy and msi use sc_ihs[0] only
    561 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    562 					 */
    563 	pci_intr_handle_t *sc_intrs;	/*
    564 					 * legacy and msi use sc_intrs[0] only
    565 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    566 					 */
    567 	int sc_nintrs;			/* number of interrupts */
    568 
    569 	int sc_link_intr_idx;		/* index of MSI-X tables */
    570 
    571 	callout_t sc_tick_ch;		/* tick callout */
    572 	bool sc_core_stopping;
    573 
    574 	int sc_nvm_ver_major;
    575 	int sc_nvm_ver_minor;
    576 	int sc_nvm_ver_build;
    577 	int sc_nvm_addrbits;		/* NVM address bits */
    578 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    579 	int sc_ich8_flash_base;
    580 	int sc_ich8_flash_bank_size;
    581 	int sc_nvm_k1_enabled;
    582 
    583 	int sc_nqueues;
    584 	struct wm_queue *sc_queue;
    585 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    586 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    587 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    588 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    589 	struct workqueue *sc_queue_wq;
    590 	bool sc_txrx_use_workqueue;
    591 
    592 	int sc_affinity_offset;
    593 
    594 #ifdef WM_EVENT_COUNTERS
    595 	/* Event counters. */
    596 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    597 
    598 	/* WM_T_82542_2_1 only */
    599 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    600 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    601 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    602 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    603 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    604 #endif /* WM_EVENT_COUNTERS */
    605 
    606 	struct sysctllog *sc_sysctllog;
    607 
    608 	/* This variable are used only on the 82547. */
    609 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    610 
    611 	uint32_t sc_ctrl;		/* prototype CTRL register */
    612 #if 0
    613 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    614 #endif
    615 	uint32_t sc_icr;		/* prototype interrupt bits */
    616 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    617 	uint32_t sc_tctl;		/* prototype TCTL register */
    618 	uint32_t sc_rctl;		/* prototype RCTL register */
    619 	uint32_t sc_txcw;		/* prototype TXCW register */
    620 	uint32_t sc_tipg;		/* prototype TIPG register */
    621 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    622 	uint32_t sc_pba;		/* prototype PBA register */
    623 
    624 	int sc_tbi_linkup;		/* TBI link status */
    625 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    626 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    627 
    628 	int sc_mchash_type;		/* multicast filter offset */
    629 
    630 	krndsource_t rnd_source;	/* random source */
    631 
    632 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    633 
    634 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    635 	kmutex_t *sc_ich_phymtx;	/*
    636 					 * 82574/82583/ICH/PCH specific PHY
    637 					 * mutex. For 82574/82583, the mutex
    638 					 * is used for both PHY and NVM.
    639 					 */
    640 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    641 
    642 	struct wm_phyop phy;
    643 	struct wm_nvmop nvm;
    644 #ifdef WM_DEBUG
    645 	uint32_t sc_debug;
    646 #endif
    647 };
    648 
    649 #define WM_CORE_LOCK(_sc)						\
    650 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    651 #define WM_CORE_UNLOCK(_sc)						\
    652 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    653 #define WM_CORE_LOCKED(_sc)						\
    654 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    655 
    656 #define	WM_RXCHAIN_RESET(rxq)						\
    657 do {									\
    658 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    659 	*(rxq)->rxq_tailp = NULL;					\
    660 	(rxq)->rxq_len = 0;						\
    661 } while (/*CONSTCOND*/0)
    662 
    663 #define	WM_RXCHAIN_LINK(rxq, m)						\
    664 do {									\
    665 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    666 	(rxq)->rxq_tailp = &(m)->m_next;				\
    667 } while (/*CONSTCOND*/0)
    668 
    669 #ifdef WM_EVENT_COUNTERS
    670 #define	WM_EVCNT_INCR(ev)						\
    671 	atomic_store_relaxed(&((ev)->ev_count),				\
    672 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    673 #define	WM_EVCNT_ADD(ev, val)						\
    674 	atomic_store_relaxed(&((ev)->ev_count),				\
    675 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    676 
    677 #define WM_Q_EVCNT_INCR(qname, evname)			\
    678 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    679 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    680 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    681 #else /* !WM_EVENT_COUNTERS */
    682 #define	WM_EVCNT_INCR(ev)	/* nothing */
    683 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    684 
    685 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    686 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    687 #endif /* !WM_EVENT_COUNTERS */
    688 
    689 #define	CSR_READ(sc, reg)						\
    690 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    691 #define	CSR_WRITE(sc, reg, val)						\
    692 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    693 #define	CSR_WRITE_FLUSH(sc)						\
    694 	(void)CSR_READ((sc), WMREG_STATUS)
    695 
    696 #define ICH8_FLASH_READ32(sc, reg)					\
    697 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    698 	    (reg) + sc->sc_flashreg_offset)
    699 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    700 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    701 	    (reg) + sc->sc_flashreg_offset, (data))
    702 
    703 #define ICH8_FLASH_READ16(sc, reg)					\
    704 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    705 	    (reg) + sc->sc_flashreg_offset)
    706 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    707 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    708 	    (reg) + sc->sc_flashreg_offset, (data))
    709 
    710 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    711 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    712 
    713 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    714 #define	WM_CDTXADDR_HI(txq, x)						\
    715 	(sizeof(bus_addr_t) == 8 ?					\
    716 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    717 
    718 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    719 #define	WM_CDRXADDR_HI(rxq, x)						\
    720 	(sizeof(bus_addr_t) == 8 ?					\
    721 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    722 
    723 /*
    724  * Register read/write functions.
    725  * Other than CSR_{READ|WRITE}().
    726  */
    727 #if 0
    728 static inline uint32_t wm_io_read(struct wm_softc *, int);
    729 #endif
    730 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    731 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    732     uint32_t, uint32_t);
    733 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    734 
    735 /*
    736  * Descriptor sync/init functions.
    737  */
    738 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    739 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    740 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    741 
    742 /*
    743  * Device driver interface functions and commonly used functions.
    744  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    745  */
    746 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    747 static int	wm_match(device_t, cfdata_t, void *);
    748 static void	wm_attach(device_t, device_t, void *);
    749 static int	wm_detach(device_t, int);
    750 static bool	wm_suspend(device_t, const pmf_qual_t *);
    751 static bool	wm_resume(device_t, const pmf_qual_t *);
    752 static void	wm_watchdog(struct ifnet *);
    753 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    754     uint16_t *);
    755 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    756     uint16_t *);
    757 static void	wm_tick(void *);
    758 static int	wm_ifflags_cb(struct ethercom *);
    759 static int	wm_ioctl(struct ifnet *, u_long, void *);
    760 /* MAC address related */
    761 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    762 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    763 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    764 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    765 static int	wm_rar_count(struct wm_softc *);
    766 static void	wm_set_filter(struct wm_softc *);
    767 /* Reset and init related */
    768 static void	wm_set_vlan(struct wm_softc *);
    769 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    770 static void	wm_get_auto_rd_done(struct wm_softc *);
    771 static void	wm_lan_init_done(struct wm_softc *);
    772 static void	wm_get_cfg_done(struct wm_softc *);
    773 static int	wm_phy_post_reset(struct wm_softc *);
    774 static int	wm_write_smbus_addr(struct wm_softc *);
    775 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    776 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    777 static void	wm_initialize_hardware_bits(struct wm_softc *);
    778 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    779 static int	wm_reset_phy(struct wm_softc *);
    780 static void	wm_flush_desc_rings(struct wm_softc *);
    781 static void	wm_reset(struct wm_softc *);
    782 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    783 static void	wm_rxdrain(struct wm_rxqueue *);
    784 static void	wm_init_rss(struct wm_softc *);
    785 static void	wm_adjust_qnum(struct wm_softc *, int);
    786 static inline bool	wm_is_using_msix(struct wm_softc *);
    787 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    788 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    789 static int	wm_setup_legacy(struct wm_softc *);
    790 static int	wm_setup_msix(struct wm_softc *);
    791 static int	wm_init(struct ifnet *);
    792 static int	wm_init_locked(struct ifnet *);
    793 static void	wm_init_sysctls(struct wm_softc *);
    794 static void	wm_unset_stopping_flags(struct wm_softc *);
    795 static void	wm_set_stopping_flags(struct wm_softc *);
    796 static void	wm_stop(struct ifnet *, int);
    797 static void	wm_stop_locked(struct ifnet *, bool, bool);
    798 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    799 static void	wm_82547_txfifo_stall(void *);
    800 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    801 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    802 /* DMA related */
    803 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    804 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    805 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    806 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    807     struct wm_txqueue *);
    808 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    809 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    810 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    811     struct wm_rxqueue *);
    812 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    813 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    814 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    815 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    816 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    817 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    818 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    819     struct wm_txqueue *);
    820 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    821     struct wm_rxqueue *);
    822 static int	wm_alloc_txrx_queues(struct wm_softc *);
    823 static void	wm_free_txrx_queues(struct wm_softc *);
    824 static int	wm_init_txrx_queues(struct wm_softc *);
    825 /* Start */
    826 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    827     struct wm_txsoft *, uint32_t *, uint8_t *);
    828 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    829 static void	wm_start(struct ifnet *);
    830 static void	wm_start_locked(struct ifnet *);
    831 static int	wm_transmit(struct ifnet *, struct mbuf *);
    832 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    833 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    834 		    bool);
    835 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    836     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    837 static void	wm_nq_start(struct ifnet *);
    838 static void	wm_nq_start_locked(struct ifnet *);
    839 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    840 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    841 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    842 		    bool);
    843 static void	wm_deferred_start_locked(struct wm_txqueue *);
    844 static void	wm_handle_queue(void *);
    845 static void	wm_handle_queue_work(struct work *, void *);
    846 /* Interrupt */
    847 static bool	wm_txeof(struct wm_txqueue *, u_int);
    848 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    849 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    850 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    851 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    852 static void	wm_linkintr(struct wm_softc *, uint32_t);
    853 static int	wm_intr_legacy(void *);
    854 static inline void	wm_txrxintr_disable(struct wm_queue *);
    855 static inline void	wm_txrxintr_enable(struct wm_queue *);
    856 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    857 static int	wm_txrxintr_msix(void *);
    858 static int	wm_linkintr_msix(void *);
    859 
    860 /*
    861  * Media related.
    862  * GMII, SGMII, TBI, SERDES and SFP.
    863  */
    864 /* Common */
    865 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    866 /* GMII related */
    867 static void	wm_gmii_reset(struct wm_softc *);
    868 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    869 static int	wm_get_phy_id_82575(struct wm_softc *);
    870 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    871 static int	wm_gmii_mediachange(struct ifnet *);
    872 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    873 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    874 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    875 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    876 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    877 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    878 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    879 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    880 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    881 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    882 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    883 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    884 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    885 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    886 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    887 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    888 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    889 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    890 	bool);
    891 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    892 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    893 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    894 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    895 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    896 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    897 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    898 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    899 static void	wm_gmii_statchg(struct ifnet *);
    900 /*
    901  * kumeran related (80003, ICH* and PCH*).
    902  * These functions are not for accessing MII registers but for accessing
    903  * kumeran specific registers.
    904  */
    905 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    906 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    907 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    908 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    909 /* EMI register related */
    910 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    911 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    912 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    913 /* SGMII */
    914 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    915 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    916 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    917 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    918 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    919 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    920 /* TBI related */
    921 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    922 static void	wm_tbi_mediainit(struct wm_softc *);
    923 static int	wm_tbi_mediachange(struct ifnet *);
    924 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    925 static int	wm_check_for_link(struct wm_softc *);
    926 static void	wm_tbi_tick(struct wm_softc *);
    927 /* SERDES related */
    928 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    929 static int	wm_serdes_mediachange(struct ifnet *);
    930 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    931 static void	wm_serdes_tick(struct wm_softc *);
    932 /* SFP related */
    933 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    934 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    935 
    936 /*
    937  * NVM related.
    938  * Microwire, SPI (w/wo EERD) and Flash.
    939  */
    940 /* Misc functions */
    941 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    942 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    943 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    944 /* Microwire */
    945 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    946 /* SPI */
    947 static int	wm_nvm_ready_spi(struct wm_softc *);
    948 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    949 /* Using with EERD */
    950 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    951 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    952 /* Flash */
    953 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    954     unsigned int *);
    955 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    956 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    957 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    958     uint32_t *);
    959 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    960 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    961 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    962 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    963 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    964 /* iNVM */
    965 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    966 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    967 /* Lock, detecting NVM type, validate checksum and read */
    968 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    969 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    970 static int	wm_nvm_validate_checksum(struct wm_softc *);
    971 static void	wm_nvm_version_invm(struct wm_softc *);
    972 static void	wm_nvm_version(struct wm_softc *);
    973 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    974 
    975 /*
    976  * Hardware semaphores.
    977  * Very complexed...
    978  */
    979 static int	wm_get_null(struct wm_softc *);
    980 static void	wm_put_null(struct wm_softc *);
    981 static int	wm_get_eecd(struct wm_softc *);
    982 static void	wm_put_eecd(struct wm_softc *);
    983 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    984 static void	wm_put_swsm_semaphore(struct wm_softc *);
    985 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    986 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    987 static int	wm_get_nvm_80003(struct wm_softc *);
    988 static void	wm_put_nvm_80003(struct wm_softc *);
    989 static int	wm_get_nvm_82571(struct wm_softc *);
    990 static void	wm_put_nvm_82571(struct wm_softc *);
    991 static int	wm_get_phy_82575(struct wm_softc *);
    992 static void	wm_put_phy_82575(struct wm_softc *);
    993 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    994 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    995 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    996 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    997 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    998 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    999 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1000 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1001 
   1002 /*
   1003  * Management mode and power management related subroutines.
   1004  * BMC, AMT, suspend/resume and EEE.
   1005  */
   1006 #if 0
   1007 static int	wm_check_mng_mode(struct wm_softc *);
   1008 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1009 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1010 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1011 #endif
   1012 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1013 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1014 static void	wm_get_hw_control(struct wm_softc *);
   1015 static void	wm_release_hw_control(struct wm_softc *);
   1016 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1017 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1018 static void	wm_init_manageability(struct wm_softc *);
   1019 static void	wm_release_manageability(struct wm_softc *);
   1020 static void	wm_get_wakeup(struct wm_softc *);
   1021 static int	wm_ulp_disable(struct wm_softc *);
   1022 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1023 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1024 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1025 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1026 static void	wm_enable_wakeup(struct wm_softc *);
   1027 static void	wm_disable_aspm(struct wm_softc *);
   1028 /* LPLU (Low Power Link Up) */
   1029 static void	wm_lplu_d0_disable(struct wm_softc *);
   1030 /* EEE */
   1031 static int	wm_set_eee_i350(struct wm_softc *);
   1032 static int	wm_set_eee_pchlan(struct wm_softc *);
   1033 static int	wm_set_eee(struct wm_softc *);
   1034 
   1035 /*
   1036  * Workarounds (mainly PHY related).
   1037  * Basically, PHY's workarounds are in the PHY drivers.
   1038  */
   1039 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1040 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1041 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1042 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1043 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1044 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1045 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1046 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1047 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1048 static int	wm_k1_workaround_lv(struct wm_softc *);
   1049 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1050 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1051 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1052 static void	wm_reset_init_script_82575(struct wm_softc *);
   1053 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1054 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1055 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1056 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1057 static int	wm_pll_workaround_i210(struct wm_softc *);
   1058 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1059 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1060 static void	wm_set_linkdown_discard(struct wm_softc *);
   1061 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1062 
   1063 #ifdef WM_DEBUG
   1064 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1065 #endif
   1066 
   1067 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1068     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1069 
   1070 /*
   1071  * Devices supported by this driver.
   1072  */
   1073 static const struct wm_product {
   1074 	pci_vendor_id_t		wmp_vendor;
   1075 	pci_product_id_t	wmp_product;
   1076 	const char		*wmp_name;
   1077 	wm_chip_type		wmp_type;
   1078 	uint32_t		wmp_flags;
   1079 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1080 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1081 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1082 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1083 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1084 } wm_products[] = {
   1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1086 	  "Intel i82542 1000BASE-X Ethernet",
   1087 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1088 
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1090 	  "Intel i82543GC 1000BASE-X Ethernet",
   1091 	  WM_T_82543,		WMP_F_FIBER },
   1092 
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1094 	  "Intel i82543GC 1000BASE-T Ethernet",
   1095 	  WM_T_82543,		WMP_F_COPPER },
   1096 
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1098 	  "Intel i82544EI 1000BASE-T Ethernet",
   1099 	  WM_T_82544,		WMP_F_COPPER },
   1100 
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1102 	  "Intel i82544EI 1000BASE-X Ethernet",
   1103 	  WM_T_82544,		WMP_F_FIBER },
   1104 
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1106 	  "Intel i82544GC 1000BASE-T Ethernet",
   1107 	  WM_T_82544,		WMP_F_COPPER },
   1108 
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1110 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1111 	  WM_T_82544,		WMP_F_COPPER },
   1112 
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1114 	  "Intel i82540EM 1000BASE-T Ethernet",
   1115 	  WM_T_82540,		WMP_F_COPPER },
   1116 
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1118 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1119 	  WM_T_82540,		WMP_F_COPPER },
   1120 
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1122 	  "Intel i82540EP 1000BASE-T Ethernet",
   1123 	  WM_T_82540,		WMP_F_COPPER },
   1124 
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1126 	  "Intel i82540EP 1000BASE-T Ethernet",
   1127 	  WM_T_82540,		WMP_F_COPPER },
   1128 
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1130 	  "Intel i82540EP 1000BASE-T Ethernet",
   1131 	  WM_T_82540,		WMP_F_COPPER },
   1132 
   1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1134 	  "Intel i82545EM 1000BASE-T Ethernet",
   1135 	  WM_T_82545,		WMP_F_COPPER },
   1136 
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1138 	  "Intel i82545GM 1000BASE-T Ethernet",
   1139 	  WM_T_82545_3,		WMP_F_COPPER },
   1140 
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1142 	  "Intel i82545GM 1000BASE-X Ethernet",
   1143 	  WM_T_82545_3,		WMP_F_FIBER },
   1144 
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1146 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1147 	  WM_T_82545_3,		WMP_F_SERDES },
   1148 
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1150 	  "Intel i82546EB 1000BASE-T Ethernet",
   1151 	  WM_T_82546,		WMP_F_COPPER },
   1152 
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1154 	  "Intel i82546EB 1000BASE-T Ethernet",
   1155 	  WM_T_82546,		WMP_F_COPPER },
   1156 
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1158 	  "Intel i82545EM 1000BASE-X Ethernet",
   1159 	  WM_T_82545,		WMP_F_FIBER },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1162 	  "Intel i82546EB 1000BASE-X Ethernet",
   1163 	  WM_T_82546,		WMP_F_FIBER },
   1164 
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1166 	  "Intel i82546GB 1000BASE-T Ethernet",
   1167 	  WM_T_82546_3,		WMP_F_COPPER },
   1168 
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1170 	  "Intel i82546GB 1000BASE-X Ethernet",
   1171 	  WM_T_82546_3,		WMP_F_FIBER },
   1172 
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1174 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1175 	  WM_T_82546_3,		WMP_F_SERDES },
   1176 
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1178 	  "i82546GB quad-port Gigabit Ethernet",
   1179 	  WM_T_82546_3,		WMP_F_COPPER },
   1180 
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1182 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1183 	  WM_T_82546_3,		WMP_F_COPPER },
   1184 
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1186 	  "Intel PRO/1000MT (82546GB)",
   1187 	  WM_T_82546_3,		WMP_F_COPPER },
   1188 
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1190 	  "Intel i82541EI 1000BASE-T Ethernet",
   1191 	  WM_T_82541,		WMP_F_COPPER },
   1192 
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1194 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1195 	  WM_T_82541,		WMP_F_COPPER },
   1196 
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1198 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1199 	  WM_T_82541,		WMP_F_COPPER },
   1200 
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1202 	  "Intel i82541ER 1000BASE-T Ethernet",
   1203 	  WM_T_82541_2,		WMP_F_COPPER },
   1204 
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1206 	  "Intel i82541GI 1000BASE-T Ethernet",
   1207 	  WM_T_82541_2,		WMP_F_COPPER },
   1208 
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1210 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1211 	  WM_T_82541_2,		WMP_F_COPPER },
   1212 
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1214 	  "Intel i82541PI 1000BASE-T Ethernet",
   1215 	  WM_T_82541_2,		WMP_F_COPPER },
   1216 
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1218 	  "Intel i82547EI 1000BASE-T Ethernet",
   1219 	  WM_T_82547,		WMP_F_COPPER },
   1220 
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1222 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1223 	  WM_T_82547,		WMP_F_COPPER },
   1224 
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1226 	  "Intel i82547GI 1000BASE-T Ethernet",
   1227 	  WM_T_82547_2,		WMP_F_COPPER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1230 	  "Intel PRO/1000 PT (82571EB)",
   1231 	  WM_T_82571,		WMP_F_COPPER },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1234 	  "Intel PRO/1000 PF (82571EB)",
   1235 	  WM_T_82571,		WMP_F_FIBER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1238 	  "Intel PRO/1000 PB (82571EB)",
   1239 	  WM_T_82571,		WMP_F_SERDES },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1242 	  "Intel PRO/1000 QT (82571EB)",
   1243 	  WM_T_82571,		WMP_F_COPPER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1246 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1247 	  WM_T_82571,		WMP_F_COPPER },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1250 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1251 	  WM_T_82571,		WMP_F_COPPER },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1254 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1255 	  WM_T_82571,		WMP_F_SERDES },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1258 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1259 	  WM_T_82571,		WMP_F_SERDES },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1262 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1263 	  WM_T_82571,		WMP_F_FIBER },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1266 	  "Intel i82572EI 1000baseT Ethernet",
   1267 	  WM_T_82572,		WMP_F_COPPER },
   1268 
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1270 	  "Intel i82572EI 1000baseX Ethernet",
   1271 	  WM_T_82572,		WMP_F_FIBER },
   1272 
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1274 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1275 	  WM_T_82572,		WMP_F_SERDES },
   1276 
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1278 	  "Intel i82572EI 1000baseT Ethernet",
   1279 	  WM_T_82572,		WMP_F_COPPER },
   1280 
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1282 	  "Intel i82573E",
   1283 	  WM_T_82573,		WMP_F_COPPER },
   1284 
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1286 	  "Intel i82573E IAMT",
   1287 	  WM_T_82573,		WMP_F_COPPER },
   1288 
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1290 	  "Intel i82573L Gigabit Ethernet",
   1291 	  WM_T_82573,		WMP_F_COPPER },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1294 	  "Intel i82574L",
   1295 	  WM_T_82574,		WMP_F_COPPER },
   1296 
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1298 	  "Intel i82574L",
   1299 	  WM_T_82574,		WMP_F_COPPER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1302 	  "Intel i82583V",
   1303 	  WM_T_82583,		WMP_F_COPPER },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1306 	  "i80003 dual 1000baseT Ethernet",
   1307 	  WM_T_80003,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1310 	  "i80003 dual 1000baseX Ethernet",
   1311 	  WM_T_80003,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1314 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1315 	  WM_T_80003,		WMP_F_SERDES },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1318 	  "Intel i80003 1000baseT Ethernet",
   1319 	  WM_T_80003,		WMP_F_COPPER },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1322 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1323 	  WM_T_80003,		WMP_F_SERDES },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1326 	  "Intel i82801H (M_AMT) LAN Controller",
   1327 	  WM_T_ICH8,		WMP_F_COPPER },
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1329 	  "Intel i82801H (AMT) LAN Controller",
   1330 	  WM_T_ICH8,		WMP_F_COPPER },
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1332 	  "Intel i82801H LAN Controller",
   1333 	  WM_T_ICH8,		WMP_F_COPPER },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1335 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1336 	  WM_T_ICH8,		WMP_F_COPPER },
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1338 	  "Intel i82801H (M) LAN Controller",
   1339 	  WM_T_ICH8,		WMP_F_COPPER },
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1341 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1342 	  WM_T_ICH8,		WMP_F_COPPER },
   1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1344 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1345 	  WM_T_ICH8,		WMP_F_COPPER },
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1347 	  "82567V-3 LAN Controller",
   1348 	  WM_T_ICH8,		WMP_F_COPPER },
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1350 	  "82801I (AMT) LAN Controller",
   1351 	  WM_T_ICH9,		WMP_F_COPPER },
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1353 	  "82801I 10/100 LAN Controller",
   1354 	  WM_T_ICH9,		WMP_F_COPPER },
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1356 	  "82801I (G) 10/100 LAN Controller",
   1357 	  WM_T_ICH9,		WMP_F_COPPER },
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1359 	  "82801I (GT) 10/100 LAN Controller",
   1360 	  WM_T_ICH9,		WMP_F_COPPER },
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1362 	  "82801I (C) LAN Controller",
   1363 	  WM_T_ICH9,		WMP_F_COPPER },
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1365 	  "82801I mobile LAN Controller",
   1366 	  WM_T_ICH9,		WMP_F_COPPER },
   1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1368 	  "82801I mobile (V) LAN Controller",
   1369 	  WM_T_ICH9,		WMP_F_COPPER },
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1371 	  "82801I mobile (AMT) LAN Controller",
   1372 	  WM_T_ICH9,		WMP_F_COPPER },
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1374 	  "82567LM-4 LAN Controller",
   1375 	  WM_T_ICH9,		WMP_F_COPPER },
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1377 	  "82567LM-2 LAN Controller",
   1378 	  WM_T_ICH10,		WMP_F_COPPER },
   1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1380 	  "82567LF-2 LAN Controller",
   1381 	  WM_T_ICH10,		WMP_F_COPPER },
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1383 	  "82567LM-3 LAN Controller",
   1384 	  WM_T_ICH10,		WMP_F_COPPER },
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1386 	  "82567LF-3 LAN Controller",
   1387 	  WM_T_ICH10,		WMP_F_COPPER },
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1389 	  "82567V-2 LAN Controller",
   1390 	  WM_T_ICH10,		WMP_F_COPPER },
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1392 	  "82567V-3? LAN Controller",
   1393 	  WM_T_ICH10,		WMP_F_COPPER },
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1395 	  "HANKSVILLE LAN Controller",
   1396 	  WM_T_ICH10,		WMP_F_COPPER },
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1398 	  "PCH LAN (82577LM) Controller",
   1399 	  WM_T_PCH,		WMP_F_COPPER },
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1401 	  "PCH LAN (82577LC) Controller",
   1402 	  WM_T_PCH,		WMP_F_COPPER },
   1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1404 	  "PCH LAN (82578DM) Controller",
   1405 	  WM_T_PCH,		WMP_F_COPPER },
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1407 	  "PCH LAN (82578DC) Controller",
   1408 	  WM_T_PCH,		WMP_F_COPPER },
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1410 	  "PCH2 LAN (82579LM) Controller",
   1411 	  WM_T_PCH2,		WMP_F_COPPER },
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1413 	  "PCH2 LAN (82579V) Controller",
   1414 	  WM_T_PCH2,		WMP_F_COPPER },
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1416 	  "82575EB dual-1000baseT Ethernet",
   1417 	  WM_T_82575,		WMP_F_COPPER },
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1419 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1420 	  WM_T_82575,		WMP_F_SERDES },
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1422 	  "82575GB quad-1000baseT Ethernet",
   1423 	  WM_T_82575,		WMP_F_COPPER },
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1425 	  "82575GB quad-1000baseT Ethernet (PM)",
   1426 	  WM_T_82575,		WMP_F_COPPER },
   1427 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1428 	  "82576 1000BaseT Ethernet",
   1429 	  WM_T_82576,		WMP_F_COPPER },
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1431 	  "82576 1000BaseX Ethernet",
   1432 	  WM_T_82576,		WMP_F_FIBER },
   1433 
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1435 	  "82576 gigabit Ethernet (SERDES)",
   1436 	  WM_T_82576,		WMP_F_SERDES },
   1437 
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1439 	  "82576 quad-1000BaseT Ethernet",
   1440 	  WM_T_82576,		WMP_F_COPPER },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1443 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1444 	  WM_T_82576,		WMP_F_COPPER },
   1445 
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1447 	  "82576 gigabit Ethernet",
   1448 	  WM_T_82576,		WMP_F_COPPER },
   1449 
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1451 	  "82576 gigabit Ethernet (SERDES)",
   1452 	  WM_T_82576,		WMP_F_SERDES },
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1454 	  "82576 quad-gigabit Ethernet (SERDES)",
   1455 	  WM_T_82576,		WMP_F_SERDES },
   1456 
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1458 	  "82580 1000BaseT Ethernet",
   1459 	  WM_T_82580,		WMP_F_COPPER },
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1461 	  "82580 1000BaseX Ethernet",
   1462 	  WM_T_82580,		WMP_F_FIBER },
   1463 
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1465 	  "82580 1000BaseT Ethernet (SERDES)",
   1466 	  WM_T_82580,		WMP_F_SERDES },
   1467 
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1469 	  "82580 gigabit Ethernet (SGMII)",
   1470 	  WM_T_82580,		WMP_F_COPPER },
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1472 	  "82580 dual-1000BaseT Ethernet",
   1473 	  WM_T_82580,		WMP_F_COPPER },
   1474 
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1476 	  "82580 quad-1000BaseX Ethernet",
   1477 	  WM_T_82580,		WMP_F_FIBER },
   1478 
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1480 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1481 	  WM_T_82580,		WMP_F_COPPER },
   1482 
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1484 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1485 	  WM_T_82580,		WMP_F_SERDES },
   1486 
   1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1488 	  "DH89XXCC 1000BASE-KX Ethernet",
   1489 	  WM_T_82580,		WMP_F_SERDES },
   1490 
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1492 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1493 	  WM_T_82580,		WMP_F_SERDES },
   1494 
   1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1496 	  "I350 Gigabit Network Connection",
   1497 	  WM_T_I350,		WMP_F_COPPER },
   1498 
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1500 	  "I350 Gigabit Fiber Network Connection",
   1501 	  WM_T_I350,		WMP_F_FIBER },
   1502 
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1504 	  "I350 Gigabit Backplane Connection",
   1505 	  WM_T_I350,		WMP_F_SERDES },
   1506 
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1508 	  "I350 Quad Port Gigabit Ethernet",
   1509 	  WM_T_I350,		WMP_F_SERDES },
   1510 
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1512 	  "I350 Gigabit Connection",
   1513 	  WM_T_I350,		WMP_F_COPPER },
   1514 
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1516 	  "I354 Gigabit Ethernet (KX)",
   1517 	  WM_T_I354,		WMP_F_SERDES },
   1518 
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1520 	  "I354 Gigabit Ethernet (SGMII)",
   1521 	  WM_T_I354,		WMP_F_COPPER },
   1522 
   1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1524 	  "I354 Gigabit Ethernet (2.5G)",
   1525 	  WM_T_I354,		WMP_F_COPPER },
   1526 
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1528 	  "I210-T1 Ethernet Server Adapter",
   1529 	  WM_T_I210,		WMP_F_COPPER },
   1530 
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1532 	  "I210 Ethernet (Copper OEM)",
   1533 	  WM_T_I210,		WMP_F_COPPER },
   1534 
   1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1536 	  "I210 Ethernet (Copper IT)",
   1537 	  WM_T_I210,		WMP_F_COPPER },
   1538 
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1540 	  "I210 Ethernet (Copper, FLASH less)",
   1541 	  WM_T_I210,		WMP_F_COPPER },
   1542 
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1544 	  "I210 Gigabit Ethernet (Fiber)",
   1545 	  WM_T_I210,		WMP_F_FIBER },
   1546 
   1547 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1548 	  "I210 Gigabit Ethernet (SERDES)",
   1549 	  WM_T_I210,		WMP_F_SERDES },
   1550 
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1552 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1553 	  WM_T_I210,		WMP_F_SERDES },
   1554 
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1556 	  "I210 Gigabit Ethernet (SGMII)",
   1557 	  WM_T_I210,		WMP_F_COPPER },
   1558 
   1559 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1560 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1561 	  WM_T_I210,		WMP_F_COPPER },
   1562 
   1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1564 	  "I211 Ethernet (COPPER)",
   1565 	  WM_T_I211,		WMP_F_COPPER },
   1566 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1567 	  "I217 V Ethernet Connection",
   1568 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1569 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1570 	  "I217 LM Ethernet Connection",
   1571 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1573 	  "I218 V Ethernet Connection",
   1574 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1575 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1576 	  "I218 V Ethernet Connection",
   1577 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1578 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1579 	  "I218 V Ethernet Connection",
   1580 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1581 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1582 	  "I218 LM Ethernet Connection",
   1583 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1584 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1585 	  "I218 LM Ethernet Connection",
   1586 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1587 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1588 	  "I218 LM Ethernet Connection",
   1589 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1590 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1591 	  "I219 LM Ethernet Connection",
   1592 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1593 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1594 	  "I219 LM Ethernet Connection",
   1595 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1596 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1597 	  "I219 LM Ethernet Connection",
   1598 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1599 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1600 	  "I219 LM Ethernet Connection",
   1601 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1602 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1603 	  "I219 LM Ethernet Connection",
   1604 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1605 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1606 	  "I219 LM Ethernet Connection",
   1607 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1608 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1609 	  "I219 LM Ethernet Connection",
   1610 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1611 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1612 	  "I219 LM Ethernet Connection",
   1613 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1614 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1615 	  "I219 LM Ethernet Connection",
   1616 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1617 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1618 	  "I219 LM Ethernet Connection",
   1619 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1620 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1621 	  "I219 LM Ethernet Connection",
   1622 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1623 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1624 	  "I219 LM Ethernet Connection",
   1625 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1626 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1627 	  "I219 LM Ethernet Connection",
   1628 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1629 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1630 	  "I219 LM Ethernet Connection",
   1631 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1632 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1633 	  "I219 LM Ethernet Connection",
   1634 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1635 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1636 	  "I219 V Ethernet Connection",
   1637 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1638 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1639 	  "I219 V Ethernet Connection",
   1640 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1641 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1642 	  "I219 V Ethernet Connection",
   1643 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1644 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1645 	  "I219 V Ethernet Connection",
   1646 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1647 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1648 	  "I219 V Ethernet Connection",
   1649 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1650 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1651 	  "I219 V Ethernet Connection",
   1652 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1653 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1654 	  "I219 V Ethernet Connection",
   1655 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1656 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1657 	  "I219 V Ethernet Connection",
   1658 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1659 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1660 	  "I219 V Ethernet Connection",
   1661 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1662 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1663 	  "I219 V Ethernet Connection",
   1664 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1665 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1666 	  "I219 V Ethernet Connection",
   1667 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1668 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1669 	  "I219 V Ethernet Connection",
   1670 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1671 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1672 	  "I219 V Ethernet Connection",
   1673 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1674 	{ 0,			0,
   1675 	  NULL,
   1676 	  0,			0 },
   1677 };
   1678 
   1679 /*
   1680  * Register read/write functions.
   1681  * Other than CSR_{READ|WRITE}().
   1682  */
   1683 
   1684 #if 0 /* Not currently used */
   1685 static inline uint32_t
   1686 wm_io_read(struct wm_softc *sc, int reg)
   1687 {
   1688 
   1689 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1690 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1691 }
   1692 #endif
   1693 
   1694 static inline void
   1695 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1696 {
   1697 
   1698 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1699 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1700 }
   1701 
   1702 static inline void
   1703 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1704     uint32_t data)
   1705 {
   1706 	uint32_t regval;
   1707 	int i;
   1708 
   1709 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1710 
   1711 	CSR_WRITE(sc, reg, regval);
   1712 
   1713 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1714 		delay(5);
   1715 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1716 			break;
   1717 	}
   1718 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1719 		aprint_error("%s: WARNING:"
   1720 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1721 		    device_xname(sc->sc_dev), reg);
   1722 	}
   1723 }
   1724 
   1725 static inline void
   1726 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1727 {
   1728 	wa->wa_low = htole32(v & 0xffffffffU);
   1729 	if (sizeof(bus_addr_t) == 8)
   1730 		wa->wa_high = htole32((uint64_t) v >> 32);
   1731 	else
   1732 		wa->wa_high = 0;
   1733 }
   1734 
   1735 /*
   1736  * Descriptor sync/init functions.
   1737  */
   1738 static inline void
   1739 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1740 {
   1741 	struct wm_softc *sc = txq->txq_sc;
   1742 
   1743 	/* If it will wrap around, sync to the end of the ring. */
   1744 	if ((start + num) > WM_NTXDESC(txq)) {
   1745 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1746 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1747 		    (WM_NTXDESC(txq) - start), ops);
   1748 		num -= (WM_NTXDESC(txq) - start);
   1749 		start = 0;
   1750 	}
   1751 
   1752 	/* Now sync whatever is left. */
   1753 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1754 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1755 }
   1756 
   1757 static inline void
   1758 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1759 {
   1760 	struct wm_softc *sc = rxq->rxq_sc;
   1761 
   1762 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1763 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1764 }
   1765 
   1766 static inline void
   1767 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1768 {
   1769 	struct wm_softc *sc = rxq->rxq_sc;
   1770 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1771 	struct mbuf *m = rxs->rxs_mbuf;
   1772 
   1773 	/*
   1774 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1775 	 * so that the payload after the Ethernet header is aligned
   1776 	 * to a 4-byte boundary.
   1777 
   1778 	 * XXX BRAINDAMAGE ALERT!
   1779 	 * The stupid chip uses the same size for every buffer, which
   1780 	 * is set in the Receive Control register.  We are using the 2K
   1781 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1782 	 * reason, we can't "scoot" packets longer than the standard
   1783 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1784 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1785 	 * the upper layer copy the headers.
   1786 	 */
   1787 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1788 
   1789 	if (sc->sc_type == WM_T_82574) {
   1790 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1791 		rxd->erx_data.erxd_addr =
   1792 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1793 		rxd->erx_data.erxd_dd = 0;
   1794 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1795 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1796 
   1797 		rxd->nqrx_data.nrxd_paddr =
   1798 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1799 		/* Currently, split header is not supported. */
   1800 		rxd->nqrx_data.nrxd_haddr = 0;
   1801 	} else {
   1802 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1803 
   1804 		wm_set_dma_addr(&rxd->wrx_addr,
   1805 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1806 		rxd->wrx_len = 0;
   1807 		rxd->wrx_cksum = 0;
   1808 		rxd->wrx_status = 0;
   1809 		rxd->wrx_errors = 0;
   1810 		rxd->wrx_special = 0;
   1811 	}
   1812 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1813 
   1814 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1815 }
   1816 
   1817 /*
   1818  * Device driver interface functions and commonly used functions.
   1819  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1820  */
   1821 
   1822 /* Lookup supported device table */
   1823 static const struct wm_product *
   1824 wm_lookup(const struct pci_attach_args *pa)
   1825 {
   1826 	const struct wm_product *wmp;
   1827 
   1828 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1829 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1830 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1831 			return wmp;
   1832 	}
   1833 	return NULL;
   1834 }
   1835 
   1836 /* The match function (ca_match) */
   1837 static int
   1838 wm_match(device_t parent, cfdata_t cf, void *aux)
   1839 {
   1840 	struct pci_attach_args *pa = aux;
   1841 
   1842 	if (wm_lookup(pa) != NULL)
   1843 		return 1;
   1844 
   1845 	return 0;
   1846 }
   1847 
   1848 /* The attach function (ca_attach) */
   1849 static void
   1850 wm_attach(device_t parent, device_t self, void *aux)
   1851 {
   1852 	struct wm_softc *sc = device_private(self);
   1853 	struct pci_attach_args *pa = aux;
   1854 	prop_dictionary_t dict;
   1855 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1856 	pci_chipset_tag_t pc = pa->pa_pc;
   1857 	int counts[PCI_INTR_TYPE_SIZE];
   1858 	pci_intr_type_t max_type;
   1859 	const char *eetype, *xname;
   1860 	bus_space_tag_t memt;
   1861 	bus_space_handle_t memh;
   1862 	bus_size_t memsize;
   1863 	int memh_valid;
   1864 	int i, error;
   1865 	const struct wm_product *wmp;
   1866 	prop_data_t ea;
   1867 	prop_number_t pn;
   1868 	uint8_t enaddr[ETHER_ADDR_LEN];
   1869 	char buf[256];
   1870 	char wqname[MAXCOMLEN];
   1871 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1872 	pcireg_t preg, memtype;
   1873 	uint16_t eeprom_data, apme_mask;
   1874 	bool force_clear_smbi;
   1875 	uint32_t link_mode;
   1876 	uint32_t reg;
   1877 
   1878 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1879 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1880 #endif
   1881 	sc->sc_dev = self;
   1882 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1883 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1884 	sc->sc_core_stopping = false;
   1885 
   1886 	wmp = wm_lookup(pa);
   1887 #ifdef DIAGNOSTIC
   1888 	if (wmp == NULL) {
   1889 		printf("\n");
   1890 		panic("wm_attach: impossible");
   1891 	}
   1892 #endif
   1893 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1894 
   1895 	sc->sc_pc = pa->pa_pc;
   1896 	sc->sc_pcitag = pa->pa_tag;
   1897 
   1898 	if (pci_dma64_available(pa))
   1899 		sc->sc_dmat = pa->pa_dmat64;
   1900 	else
   1901 		sc->sc_dmat = pa->pa_dmat;
   1902 
   1903 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1904 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1905 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1906 
   1907 	sc->sc_type = wmp->wmp_type;
   1908 
   1909 	/* Set default function pointers */
   1910 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1911 	sc->phy.release = sc->nvm.release = wm_put_null;
   1912 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1913 
   1914 	if (sc->sc_type < WM_T_82543) {
   1915 		if (sc->sc_rev < 2) {
   1916 			aprint_error_dev(sc->sc_dev,
   1917 			    "i82542 must be at least rev. 2\n");
   1918 			return;
   1919 		}
   1920 		if (sc->sc_rev < 3)
   1921 			sc->sc_type = WM_T_82542_2_0;
   1922 	}
   1923 
   1924 	/*
   1925 	 * Disable MSI for Errata:
   1926 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1927 	 *
   1928 	 *  82544: Errata 25
   1929 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1930 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1931 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1932 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1933 	 *
   1934 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1935 	 *
   1936 	 *  82571 & 82572: Errata 63
   1937 	 */
   1938 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1939 	    || (sc->sc_type == WM_T_82572))
   1940 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1941 
   1942 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1943 	    || (sc->sc_type == WM_T_82580)
   1944 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1945 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1946 		sc->sc_flags |= WM_F_NEWQUEUE;
   1947 
   1948 	/* Set device properties (mactype) */
   1949 	dict = device_properties(sc->sc_dev);
   1950 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1951 
   1952 	/*
   1953 	 * Map the device.  All devices support memory-mapped acccess,
   1954 	 * and it is really required for normal operation.
   1955 	 */
   1956 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1957 	switch (memtype) {
   1958 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1959 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1960 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1961 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1962 		break;
   1963 	default:
   1964 		memh_valid = 0;
   1965 		break;
   1966 	}
   1967 
   1968 	if (memh_valid) {
   1969 		sc->sc_st = memt;
   1970 		sc->sc_sh = memh;
   1971 		sc->sc_ss = memsize;
   1972 	} else {
   1973 		aprint_error_dev(sc->sc_dev,
   1974 		    "unable to map device registers\n");
   1975 		return;
   1976 	}
   1977 
   1978 	/*
   1979 	 * In addition, i82544 and later support I/O mapped indirect
   1980 	 * register access.  It is not desirable (nor supported in
   1981 	 * this driver) to use it for normal operation, though it is
   1982 	 * required to work around bugs in some chip versions.
   1983 	 */
   1984 	if (sc->sc_type >= WM_T_82544) {
   1985 		/* First we have to find the I/O BAR. */
   1986 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1987 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1988 			if (memtype == PCI_MAPREG_TYPE_IO)
   1989 				break;
   1990 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1991 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1992 				i += 4;	/* skip high bits, too */
   1993 		}
   1994 		if (i < PCI_MAPREG_END) {
   1995 			/*
   1996 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1997 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1998 			 * It's no problem because newer chips has no this
   1999 			 * bug.
   2000 			 *
   2001 			 * The i8254x doesn't apparently respond when the
   2002 			 * I/O BAR is 0, which looks somewhat like it's not
   2003 			 * been configured.
   2004 			 */
   2005 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2006 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2007 				aprint_error_dev(sc->sc_dev,
   2008 				    "WARNING: I/O BAR at zero.\n");
   2009 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2010 					0, &sc->sc_iot, &sc->sc_ioh,
   2011 					NULL, &sc->sc_ios) == 0) {
   2012 				sc->sc_flags |= WM_F_IOH_VALID;
   2013 			} else
   2014 				aprint_error_dev(sc->sc_dev,
   2015 				    "WARNING: unable to map I/O space\n");
   2016 		}
   2017 
   2018 	}
   2019 
   2020 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2021 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2022 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2023 	if (sc->sc_type < WM_T_82542_2_1)
   2024 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2025 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2026 
   2027 	/* Power up chip */
   2028 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2029 	    && error != EOPNOTSUPP) {
   2030 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2031 		return;
   2032 	}
   2033 
   2034 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2035 	/*
   2036 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2037 	 * resource.
   2038 	 */
   2039 	if (sc->sc_nqueues > 1) {
   2040 		max_type = PCI_INTR_TYPE_MSIX;
   2041 		/*
   2042 		 *  82583 has a MSI-X capability in the PCI configuration space
   2043 		 * but it doesn't support it. At least the document doesn't
   2044 		 * say anything about MSI-X.
   2045 		 */
   2046 		counts[PCI_INTR_TYPE_MSIX]
   2047 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2048 	} else {
   2049 		max_type = PCI_INTR_TYPE_MSI;
   2050 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2051 	}
   2052 
   2053 	/* Allocation settings */
   2054 	counts[PCI_INTR_TYPE_MSI] = 1;
   2055 	counts[PCI_INTR_TYPE_INTX] = 1;
   2056 	/* overridden by disable flags */
   2057 	if (wm_disable_msi != 0) {
   2058 		counts[PCI_INTR_TYPE_MSI] = 0;
   2059 		if (wm_disable_msix != 0) {
   2060 			max_type = PCI_INTR_TYPE_INTX;
   2061 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2062 		}
   2063 	} else if (wm_disable_msix != 0) {
   2064 		max_type = PCI_INTR_TYPE_MSI;
   2065 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2066 	}
   2067 
   2068 alloc_retry:
   2069 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2070 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2071 		return;
   2072 	}
   2073 
   2074 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2075 		error = wm_setup_msix(sc);
   2076 		if (error) {
   2077 			pci_intr_release(pc, sc->sc_intrs,
   2078 			    counts[PCI_INTR_TYPE_MSIX]);
   2079 
   2080 			/* Setup for MSI: Disable MSI-X */
   2081 			max_type = PCI_INTR_TYPE_MSI;
   2082 			counts[PCI_INTR_TYPE_MSI] = 1;
   2083 			counts[PCI_INTR_TYPE_INTX] = 1;
   2084 			goto alloc_retry;
   2085 		}
   2086 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2087 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2088 		error = wm_setup_legacy(sc);
   2089 		if (error) {
   2090 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2091 			    counts[PCI_INTR_TYPE_MSI]);
   2092 
   2093 			/* The next try is for INTx: Disable MSI */
   2094 			max_type = PCI_INTR_TYPE_INTX;
   2095 			counts[PCI_INTR_TYPE_INTX] = 1;
   2096 			goto alloc_retry;
   2097 		}
   2098 	} else {
   2099 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2100 		error = wm_setup_legacy(sc);
   2101 		if (error) {
   2102 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2103 			    counts[PCI_INTR_TYPE_INTX]);
   2104 			return;
   2105 		}
   2106 	}
   2107 
   2108 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2109 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2110 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2111 	    WM_WORKQUEUE_FLAGS);
   2112 	if (error) {
   2113 		aprint_error_dev(sc->sc_dev,
   2114 		    "unable to create workqueue\n");
   2115 		goto out;
   2116 	}
   2117 
   2118 	/*
   2119 	 * Check the function ID (unit number of the chip).
   2120 	 */
   2121 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2122 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2123 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2124 	    || (sc->sc_type == WM_T_82580)
   2125 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2126 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2127 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2128 	else
   2129 		sc->sc_funcid = 0;
   2130 
   2131 	/*
   2132 	 * Determine a few things about the bus we're connected to.
   2133 	 */
   2134 	if (sc->sc_type < WM_T_82543) {
   2135 		/* We don't really know the bus characteristics here. */
   2136 		sc->sc_bus_speed = 33;
   2137 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2138 		/*
   2139 		 * CSA (Communication Streaming Architecture) is about as fast
   2140 		 * a 32-bit 66MHz PCI Bus.
   2141 		 */
   2142 		sc->sc_flags |= WM_F_CSA;
   2143 		sc->sc_bus_speed = 66;
   2144 		aprint_verbose_dev(sc->sc_dev,
   2145 		    "Communication Streaming Architecture\n");
   2146 		if (sc->sc_type == WM_T_82547) {
   2147 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2148 			callout_setfunc(&sc->sc_txfifo_ch,
   2149 			    wm_82547_txfifo_stall, sc);
   2150 			aprint_verbose_dev(sc->sc_dev,
   2151 			    "using 82547 Tx FIFO stall work-around\n");
   2152 		}
   2153 	} else if (sc->sc_type >= WM_T_82571) {
   2154 		sc->sc_flags |= WM_F_PCIE;
   2155 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2156 		    && (sc->sc_type != WM_T_ICH10)
   2157 		    && (sc->sc_type != WM_T_PCH)
   2158 		    && (sc->sc_type != WM_T_PCH2)
   2159 		    && (sc->sc_type != WM_T_PCH_LPT)
   2160 		    && (sc->sc_type != WM_T_PCH_SPT)
   2161 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2162 			/* ICH* and PCH* have no PCIe capability registers */
   2163 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2164 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2165 				NULL) == 0)
   2166 				aprint_error_dev(sc->sc_dev,
   2167 				    "unable to find PCIe capability\n");
   2168 		}
   2169 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2170 	} else {
   2171 		reg = CSR_READ(sc, WMREG_STATUS);
   2172 		if (reg & STATUS_BUS64)
   2173 			sc->sc_flags |= WM_F_BUS64;
   2174 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2175 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2176 
   2177 			sc->sc_flags |= WM_F_PCIX;
   2178 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2179 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2180 				aprint_error_dev(sc->sc_dev,
   2181 				    "unable to find PCIX capability\n");
   2182 			else if (sc->sc_type != WM_T_82545_3 &&
   2183 				 sc->sc_type != WM_T_82546_3) {
   2184 				/*
   2185 				 * Work around a problem caused by the BIOS
   2186 				 * setting the max memory read byte count
   2187 				 * incorrectly.
   2188 				 */
   2189 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2190 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2191 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2192 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2193 
   2194 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2195 				    PCIX_CMD_BYTECNT_SHIFT;
   2196 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2197 				    PCIX_STATUS_MAXB_SHIFT;
   2198 				if (bytecnt > maxb) {
   2199 					aprint_verbose_dev(sc->sc_dev,
   2200 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2201 					    512 << bytecnt, 512 << maxb);
   2202 					pcix_cmd = (pcix_cmd &
   2203 					    ~PCIX_CMD_BYTECNT_MASK) |
   2204 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2205 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2206 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2207 					    pcix_cmd);
   2208 				}
   2209 			}
   2210 		}
   2211 		/*
   2212 		 * The quad port adapter is special; it has a PCIX-PCIX
   2213 		 * bridge on the board, and can run the secondary bus at
   2214 		 * a higher speed.
   2215 		 */
   2216 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2217 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2218 								      : 66;
   2219 		} else if (sc->sc_flags & WM_F_PCIX) {
   2220 			switch (reg & STATUS_PCIXSPD_MASK) {
   2221 			case STATUS_PCIXSPD_50_66:
   2222 				sc->sc_bus_speed = 66;
   2223 				break;
   2224 			case STATUS_PCIXSPD_66_100:
   2225 				sc->sc_bus_speed = 100;
   2226 				break;
   2227 			case STATUS_PCIXSPD_100_133:
   2228 				sc->sc_bus_speed = 133;
   2229 				break;
   2230 			default:
   2231 				aprint_error_dev(sc->sc_dev,
   2232 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2233 				    reg & STATUS_PCIXSPD_MASK);
   2234 				sc->sc_bus_speed = 66;
   2235 				break;
   2236 			}
   2237 		} else
   2238 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2239 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2240 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2241 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2242 	}
   2243 
   2244 	/* clear interesting stat counters */
   2245 	CSR_READ(sc, WMREG_COLC);
   2246 	CSR_READ(sc, WMREG_RXERRC);
   2247 
   2248 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2249 	    || (sc->sc_type >= WM_T_ICH8))
   2250 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2251 	if (sc->sc_type >= WM_T_ICH8)
   2252 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2253 
   2254 	/* Set PHY, NVM mutex related stuff */
   2255 	switch (sc->sc_type) {
   2256 	case WM_T_82542_2_0:
   2257 	case WM_T_82542_2_1:
   2258 	case WM_T_82543:
   2259 	case WM_T_82544:
   2260 		/* Microwire */
   2261 		sc->nvm.read = wm_nvm_read_uwire;
   2262 		sc->sc_nvm_wordsize = 64;
   2263 		sc->sc_nvm_addrbits = 6;
   2264 		break;
   2265 	case WM_T_82540:
   2266 	case WM_T_82545:
   2267 	case WM_T_82545_3:
   2268 	case WM_T_82546:
   2269 	case WM_T_82546_3:
   2270 		/* Microwire */
   2271 		sc->nvm.read = wm_nvm_read_uwire;
   2272 		reg = CSR_READ(sc, WMREG_EECD);
   2273 		if (reg & EECD_EE_SIZE) {
   2274 			sc->sc_nvm_wordsize = 256;
   2275 			sc->sc_nvm_addrbits = 8;
   2276 		} else {
   2277 			sc->sc_nvm_wordsize = 64;
   2278 			sc->sc_nvm_addrbits = 6;
   2279 		}
   2280 		sc->sc_flags |= WM_F_LOCK_EECD;
   2281 		sc->nvm.acquire = wm_get_eecd;
   2282 		sc->nvm.release = wm_put_eecd;
   2283 		break;
   2284 	case WM_T_82541:
   2285 	case WM_T_82541_2:
   2286 	case WM_T_82547:
   2287 	case WM_T_82547_2:
   2288 		reg = CSR_READ(sc, WMREG_EECD);
   2289 		/*
   2290 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2291 		 * on 8254[17], so set flags and functios before calling it.
   2292 		 */
   2293 		sc->sc_flags |= WM_F_LOCK_EECD;
   2294 		sc->nvm.acquire = wm_get_eecd;
   2295 		sc->nvm.release = wm_put_eecd;
   2296 		if (reg & EECD_EE_TYPE) {
   2297 			/* SPI */
   2298 			sc->nvm.read = wm_nvm_read_spi;
   2299 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2300 			wm_nvm_set_addrbits_size_eecd(sc);
   2301 		} else {
   2302 			/* Microwire */
   2303 			sc->nvm.read = wm_nvm_read_uwire;
   2304 			if ((reg & EECD_EE_ABITS) != 0) {
   2305 				sc->sc_nvm_wordsize = 256;
   2306 				sc->sc_nvm_addrbits = 8;
   2307 			} else {
   2308 				sc->sc_nvm_wordsize = 64;
   2309 				sc->sc_nvm_addrbits = 6;
   2310 			}
   2311 		}
   2312 		break;
   2313 	case WM_T_82571:
   2314 	case WM_T_82572:
   2315 		/* SPI */
   2316 		sc->nvm.read = wm_nvm_read_eerd;
   2317 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2318 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2319 		wm_nvm_set_addrbits_size_eecd(sc);
   2320 		sc->phy.acquire = wm_get_swsm_semaphore;
   2321 		sc->phy.release = wm_put_swsm_semaphore;
   2322 		sc->nvm.acquire = wm_get_nvm_82571;
   2323 		sc->nvm.release = wm_put_nvm_82571;
   2324 		break;
   2325 	case WM_T_82573:
   2326 	case WM_T_82574:
   2327 	case WM_T_82583:
   2328 		sc->nvm.read = wm_nvm_read_eerd;
   2329 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2330 		if (sc->sc_type == WM_T_82573) {
   2331 			sc->phy.acquire = wm_get_swsm_semaphore;
   2332 			sc->phy.release = wm_put_swsm_semaphore;
   2333 			sc->nvm.acquire = wm_get_nvm_82571;
   2334 			sc->nvm.release = wm_put_nvm_82571;
   2335 		} else {
   2336 			/* Both PHY and NVM use the same semaphore. */
   2337 			sc->phy.acquire = sc->nvm.acquire
   2338 			    = wm_get_swfwhw_semaphore;
   2339 			sc->phy.release = sc->nvm.release
   2340 			    = wm_put_swfwhw_semaphore;
   2341 		}
   2342 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2343 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2344 			sc->sc_nvm_wordsize = 2048;
   2345 		} else {
   2346 			/* SPI */
   2347 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2348 			wm_nvm_set_addrbits_size_eecd(sc);
   2349 		}
   2350 		break;
   2351 	case WM_T_82575:
   2352 	case WM_T_82576:
   2353 	case WM_T_82580:
   2354 	case WM_T_I350:
   2355 	case WM_T_I354:
   2356 	case WM_T_80003:
   2357 		/* SPI */
   2358 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2359 		wm_nvm_set_addrbits_size_eecd(sc);
   2360 		if ((sc->sc_type == WM_T_80003)
   2361 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2362 			sc->nvm.read = wm_nvm_read_eerd;
   2363 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2364 		} else {
   2365 			sc->nvm.read = wm_nvm_read_spi;
   2366 			sc->sc_flags |= WM_F_LOCK_EECD;
   2367 		}
   2368 		sc->phy.acquire = wm_get_phy_82575;
   2369 		sc->phy.release = wm_put_phy_82575;
   2370 		sc->nvm.acquire = wm_get_nvm_80003;
   2371 		sc->nvm.release = wm_put_nvm_80003;
   2372 		break;
   2373 	case WM_T_ICH8:
   2374 	case WM_T_ICH9:
   2375 	case WM_T_ICH10:
   2376 	case WM_T_PCH:
   2377 	case WM_T_PCH2:
   2378 	case WM_T_PCH_LPT:
   2379 		sc->nvm.read = wm_nvm_read_ich8;
   2380 		/* FLASH */
   2381 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2382 		sc->sc_nvm_wordsize = 2048;
   2383 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2384 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2385 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2386 			aprint_error_dev(sc->sc_dev,
   2387 			    "can't map FLASH registers\n");
   2388 			goto out;
   2389 		}
   2390 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2391 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2392 		    ICH_FLASH_SECTOR_SIZE;
   2393 		sc->sc_ich8_flash_bank_size =
   2394 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2395 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2396 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2397 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2398 		sc->sc_flashreg_offset = 0;
   2399 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2400 		sc->phy.release = wm_put_swflag_ich8lan;
   2401 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2402 		sc->nvm.release = wm_put_nvm_ich8lan;
   2403 		break;
   2404 	case WM_T_PCH_SPT:
   2405 	case WM_T_PCH_CNP:
   2406 		sc->nvm.read = wm_nvm_read_spt;
   2407 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2408 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2409 		sc->sc_flasht = sc->sc_st;
   2410 		sc->sc_flashh = sc->sc_sh;
   2411 		sc->sc_ich8_flash_base = 0;
   2412 		sc->sc_nvm_wordsize =
   2413 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2414 		    * NVM_SIZE_MULTIPLIER;
   2415 		/* It is size in bytes, we want words */
   2416 		sc->sc_nvm_wordsize /= 2;
   2417 		/* Assume 2 banks */
   2418 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2419 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2420 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2421 		sc->phy.release = wm_put_swflag_ich8lan;
   2422 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2423 		sc->nvm.release = wm_put_nvm_ich8lan;
   2424 		break;
   2425 	case WM_T_I210:
   2426 	case WM_T_I211:
   2427 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2428 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2429 		if (wm_nvm_flash_presence_i210(sc)) {
   2430 			sc->nvm.read = wm_nvm_read_eerd;
   2431 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2432 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2433 			wm_nvm_set_addrbits_size_eecd(sc);
   2434 		} else {
   2435 			sc->nvm.read = wm_nvm_read_invm;
   2436 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2437 			sc->sc_nvm_wordsize = INVM_SIZE;
   2438 		}
   2439 		sc->phy.acquire = wm_get_phy_82575;
   2440 		sc->phy.release = wm_put_phy_82575;
   2441 		sc->nvm.acquire = wm_get_nvm_80003;
   2442 		sc->nvm.release = wm_put_nvm_80003;
   2443 		break;
   2444 	default:
   2445 		break;
   2446 	}
   2447 
   2448 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2449 	switch (sc->sc_type) {
   2450 	case WM_T_82571:
   2451 	case WM_T_82572:
   2452 		reg = CSR_READ(sc, WMREG_SWSM2);
   2453 		if ((reg & SWSM2_LOCK) == 0) {
   2454 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2455 			force_clear_smbi = true;
   2456 		} else
   2457 			force_clear_smbi = false;
   2458 		break;
   2459 	case WM_T_82573:
   2460 	case WM_T_82574:
   2461 	case WM_T_82583:
   2462 		force_clear_smbi = true;
   2463 		break;
   2464 	default:
   2465 		force_clear_smbi = false;
   2466 		break;
   2467 	}
   2468 	if (force_clear_smbi) {
   2469 		reg = CSR_READ(sc, WMREG_SWSM);
   2470 		if ((reg & SWSM_SMBI) != 0)
   2471 			aprint_error_dev(sc->sc_dev,
   2472 			    "Please update the Bootagent\n");
   2473 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2474 	}
   2475 
   2476 	/*
   2477 	 * Defer printing the EEPROM type until after verifying the checksum
   2478 	 * This allows the EEPROM type to be printed correctly in the case
   2479 	 * that no EEPROM is attached.
   2480 	 */
   2481 	/*
   2482 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2483 	 * this for later, so we can fail future reads from the EEPROM.
   2484 	 */
   2485 	if (wm_nvm_validate_checksum(sc)) {
   2486 		/*
   2487 		 * Read twice again because some PCI-e parts fail the
   2488 		 * first check due to the link being in sleep state.
   2489 		 */
   2490 		if (wm_nvm_validate_checksum(sc))
   2491 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2492 	}
   2493 
   2494 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2495 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2496 	else {
   2497 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2498 		    sc->sc_nvm_wordsize);
   2499 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2500 			aprint_verbose("iNVM");
   2501 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2502 			aprint_verbose("FLASH(HW)");
   2503 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2504 			aprint_verbose("FLASH");
   2505 		else {
   2506 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2507 				eetype = "SPI";
   2508 			else
   2509 				eetype = "MicroWire";
   2510 			aprint_verbose("(%d address bits) %s EEPROM",
   2511 			    sc->sc_nvm_addrbits, eetype);
   2512 		}
   2513 	}
   2514 	wm_nvm_version(sc);
   2515 	aprint_verbose("\n");
   2516 
   2517 	/*
   2518 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2519 	 * incorrect.
   2520 	 */
   2521 	wm_gmii_setup_phytype(sc, 0, 0);
   2522 
   2523 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2524 	switch (sc->sc_type) {
   2525 	case WM_T_ICH8:
   2526 	case WM_T_ICH9:
   2527 	case WM_T_ICH10:
   2528 	case WM_T_PCH:
   2529 	case WM_T_PCH2:
   2530 	case WM_T_PCH_LPT:
   2531 	case WM_T_PCH_SPT:
   2532 	case WM_T_PCH_CNP:
   2533 		apme_mask = WUC_APME;
   2534 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2535 		if ((eeprom_data & apme_mask) != 0)
   2536 			sc->sc_flags |= WM_F_WOL;
   2537 		break;
   2538 	default:
   2539 		break;
   2540 	}
   2541 
   2542 	/* Reset the chip to a known state. */
   2543 	wm_reset(sc);
   2544 
   2545 	/*
   2546 	 * Check for I21[01] PLL workaround.
   2547 	 *
   2548 	 * Three cases:
   2549 	 * a) Chip is I211.
   2550 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2551 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2552 	 */
   2553 	if (sc->sc_type == WM_T_I211)
   2554 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2555 	if (sc->sc_type == WM_T_I210) {
   2556 		if (!wm_nvm_flash_presence_i210(sc))
   2557 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2558 		else if ((sc->sc_nvm_ver_major < 3)
   2559 		    || ((sc->sc_nvm_ver_major == 3)
   2560 			&& (sc->sc_nvm_ver_minor < 25))) {
   2561 			aprint_verbose_dev(sc->sc_dev,
   2562 			    "ROM image version %d.%d is older than 3.25\n",
   2563 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2564 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2565 		}
   2566 	}
   2567 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2568 		wm_pll_workaround_i210(sc);
   2569 
   2570 	wm_get_wakeup(sc);
   2571 
   2572 	/* Non-AMT based hardware can now take control from firmware */
   2573 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2574 		wm_get_hw_control(sc);
   2575 
   2576 	/*
   2577 	 * Read the Ethernet address from the EEPROM, if not first found
   2578 	 * in device properties.
   2579 	 */
   2580 	ea = prop_dictionary_get(dict, "mac-address");
   2581 	if (ea != NULL) {
   2582 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2583 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2584 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2585 	} else {
   2586 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2587 			aprint_error_dev(sc->sc_dev,
   2588 			    "unable to read Ethernet address\n");
   2589 			goto out;
   2590 		}
   2591 	}
   2592 
   2593 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2594 	    ether_sprintf(enaddr));
   2595 
   2596 	/*
   2597 	 * Read the config info from the EEPROM, and set up various
   2598 	 * bits in the control registers based on their contents.
   2599 	 */
   2600 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2601 	if (pn != NULL) {
   2602 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2603 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2604 	} else {
   2605 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2606 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2607 			goto out;
   2608 		}
   2609 	}
   2610 
   2611 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2612 	if (pn != NULL) {
   2613 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2614 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2615 	} else {
   2616 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2617 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2618 			goto out;
   2619 		}
   2620 	}
   2621 
   2622 	/* check for WM_F_WOL */
   2623 	switch (sc->sc_type) {
   2624 	case WM_T_82542_2_0:
   2625 	case WM_T_82542_2_1:
   2626 	case WM_T_82543:
   2627 		/* dummy? */
   2628 		eeprom_data = 0;
   2629 		apme_mask = NVM_CFG3_APME;
   2630 		break;
   2631 	case WM_T_82544:
   2632 		apme_mask = NVM_CFG2_82544_APM_EN;
   2633 		eeprom_data = cfg2;
   2634 		break;
   2635 	case WM_T_82546:
   2636 	case WM_T_82546_3:
   2637 	case WM_T_82571:
   2638 	case WM_T_82572:
   2639 	case WM_T_82573:
   2640 	case WM_T_82574:
   2641 	case WM_T_82583:
   2642 	case WM_T_80003:
   2643 	case WM_T_82575:
   2644 	case WM_T_82576:
   2645 		apme_mask = NVM_CFG3_APME;
   2646 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2647 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2648 		break;
   2649 	case WM_T_82580:
   2650 	case WM_T_I350:
   2651 	case WM_T_I354:
   2652 	case WM_T_I210:
   2653 	case WM_T_I211:
   2654 		apme_mask = NVM_CFG3_APME;
   2655 		wm_nvm_read(sc,
   2656 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2657 		    1, &eeprom_data);
   2658 		break;
   2659 	case WM_T_ICH8:
   2660 	case WM_T_ICH9:
   2661 	case WM_T_ICH10:
   2662 	case WM_T_PCH:
   2663 	case WM_T_PCH2:
   2664 	case WM_T_PCH_LPT:
   2665 	case WM_T_PCH_SPT:
   2666 	case WM_T_PCH_CNP:
   2667 		/* Already checked before wm_reset () */
   2668 		apme_mask = eeprom_data = 0;
   2669 		break;
   2670 	default: /* XXX 82540 */
   2671 		apme_mask = NVM_CFG3_APME;
   2672 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2673 		break;
   2674 	}
   2675 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2676 	if ((eeprom_data & apme_mask) != 0)
   2677 		sc->sc_flags |= WM_F_WOL;
   2678 
   2679 	/*
   2680 	 * We have the eeprom settings, now apply the special cases
   2681 	 * where the eeprom may be wrong or the board won't support
   2682 	 * wake on lan on a particular port
   2683 	 */
   2684 	switch (sc->sc_pcidevid) {
   2685 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2686 		sc->sc_flags &= ~WM_F_WOL;
   2687 		break;
   2688 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2689 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2690 		/* Wake events only supported on port A for dual fiber
   2691 		 * regardless of eeprom setting */
   2692 		if (sc->sc_funcid == 1)
   2693 			sc->sc_flags &= ~WM_F_WOL;
   2694 		break;
   2695 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2696 		/* If quad port adapter, disable WoL on all but port A */
   2697 		if (sc->sc_funcid != 0)
   2698 			sc->sc_flags &= ~WM_F_WOL;
   2699 		break;
   2700 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2701 		/* Wake events only supported on port A for dual fiber
   2702 		 * regardless of eeprom setting */
   2703 		if (sc->sc_funcid == 1)
   2704 			sc->sc_flags &= ~WM_F_WOL;
   2705 		break;
   2706 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2707 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2708 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2709 		/* If quad port adapter, disable WoL on all but port A */
   2710 		if (sc->sc_funcid != 0)
   2711 			sc->sc_flags &= ~WM_F_WOL;
   2712 		break;
   2713 	}
   2714 
   2715 	if (sc->sc_type >= WM_T_82575) {
   2716 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2717 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2718 			    nvmword);
   2719 			if ((sc->sc_type == WM_T_82575) ||
   2720 			    (sc->sc_type == WM_T_82576)) {
   2721 				/* Check NVM for autonegotiation */
   2722 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2723 				    != 0)
   2724 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2725 			}
   2726 			if ((sc->sc_type == WM_T_82575) ||
   2727 			    (sc->sc_type == WM_T_I350)) {
   2728 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2729 					sc->sc_flags |= WM_F_MAS;
   2730 			}
   2731 		}
   2732 	}
   2733 
   2734 	/*
   2735 	 * XXX need special handling for some multiple port cards
   2736 	 * to disable a paticular port.
   2737 	 */
   2738 
   2739 	if (sc->sc_type >= WM_T_82544) {
   2740 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2741 		if (pn != NULL) {
   2742 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2743 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2744 		} else {
   2745 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2746 				aprint_error_dev(sc->sc_dev,
   2747 				    "unable to read SWDPIN\n");
   2748 				goto out;
   2749 			}
   2750 		}
   2751 	}
   2752 
   2753 	if (cfg1 & NVM_CFG1_ILOS)
   2754 		sc->sc_ctrl |= CTRL_ILOS;
   2755 
   2756 	/*
   2757 	 * XXX
   2758 	 * This code isn't correct because pin 2 and 3 are located
   2759 	 * in different position on newer chips. Check all datasheet.
   2760 	 *
   2761 	 * Until resolve this problem, check if a chip < 82580
   2762 	 */
   2763 	if (sc->sc_type <= WM_T_82580) {
   2764 		if (sc->sc_type >= WM_T_82544) {
   2765 			sc->sc_ctrl |=
   2766 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2767 			    CTRL_SWDPIO_SHIFT;
   2768 			sc->sc_ctrl |=
   2769 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2770 			    CTRL_SWDPINS_SHIFT;
   2771 		} else {
   2772 			sc->sc_ctrl |=
   2773 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2774 			    CTRL_SWDPIO_SHIFT;
   2775 		}
   2776 	}
   2777 
   2778 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2779 		wm_nvm_read(sc,
   2780 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2781 		    1, &nvmword);
   2782 		if (nvmword & NVM_CFG3_ILOS)
   2783 			sc->sc_ctrl |= CTRL_ILOS;
   2784 	}
   2785 
   2786 #if 0
   2787 	if (sc->sc_type >= WM_T_82544) {
   2788 		if (cfg1 & NVM_CFG1_IPS0)
   2789 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2790 		if (cfg1 & NVM_CFG1_IPS1)
   2791 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2792 		sc->sc_ctrl_ext |=
   2793 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2794 		    CTRL_EXT_SWDPIO_SHIFT;
   2795 		sc->sc_ctrl_ext |=
   2796 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2797 		    CTRL_EXT_SWDPINS_SHIFT;
   2798 	} else {
   2799 		sc->sc_ctrl_ext |=
   2800 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2801 		    CTRL_EXT_SWDPIO_SHIFT;
   2802 	}
   2803 #endif
   2804 
   2805 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2806 #if 0
   2807 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2808 #endif
   2809 
   2810 	if (sc->sc_type == WM_T_PCH) {
   2811 		uint16_t val;
   2812 
   2813 		/* Save the NVM K1 bit setting */
   2814 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2815 
   2816 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2817 			sc->sc_nvm_k1_enabled = 1;
   2818 		else
   2819 			sc->sc_nvm_k1_enabled = 0;
   2820 	}
   2821 
   2822 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2823 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2824 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2825 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2826 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2827 	    || sc->sc_type == WM_T_82573
   2828 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2829 		/* Copper only */
   2830 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2831 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2832 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2833 	    || (sc->sc_type ==WM_T_I211)) {
   2834 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2835 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2836 		switch (link_mode) {
   2837 		case CTRL_EXT_LINK_MODE_1000KX:
   2838 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2839 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2840 			break;
   2841 		case CTRL_EXT_LINK_MODE_SGMII:
   2842 			if (wm_sgmii_uses_mdio(sc)) {
   2843 				aprint_normal_dev(sc->sc_dev,
   2844 				    "SGMII(MDIO)\n");
   2845 				sc->sc_flags |= WM_F_SGMII;
   2846 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2847 				break;
   2848 			}
   2849 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2850 			/*FALLTHROUGH*/
   2851 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2852 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2853 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2854 				if (link_mode
   2855 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2856 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2857 					sc->sc_flags |= WM_F_SGMII;
   2858 					aprint_verbose_dev(sc->sc_dev,
   2859 					    "SGMII\n");
   2860 				} else {
   2861 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2862 					aprint_verbose_dev(sc->sc_dev,
   2863 					    "SERDES\n");
   2864 				}
   2865 				break;
   2866 			}
   2867 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2868 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2869 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2870 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2871 				sc->sc_flags |= WM_F_SGMII;
   2872 			}
   2873 			/* Do not change link mode for 100BaseFX */
   2874 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2875 				break;
   2876 
   2877 			/* Change current link mode setting */
   2878 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2879 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2880 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2881 			else
   2882 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2883 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2884 			break;
   2885 		case CTRL_EXT_LINK_MODE_GMII:
   2886 		default:
   2887 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2888 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2889 			break;
   2890 		}
   2891 
   2892 		reg &= ~CTRL_EXT_I2C_ENA;
   2893 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2894 			reg |= CTRL_EXT_I2C_ENA;
   2895 		else
   2896 			reg &= ~CTRL_EXT_I2C_ENA;
   2897 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2898 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2899 			if (!wm_sgmii_uses_mdio(sc))
   2900 				wm_gmii_setup_phytype(sc, 0, 0);
   2901 			wm_reset_mdicnfg_82580(sc);
   2902 		}
   2903 	} else if (sc->sc_type < WM_T_82543 ||
   2904 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2905 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2906 			aprint_error_dev(sc->sc_dev,
   2907 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2908 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2909 		}
   2910 	} else {
   2911 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2912 			aprint_error_dev(sc->sc_dev,
   2913 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2914 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2915 		}
   2916 	}
   2917 
   2918 	if (sc->sc_type >= WM_T_PCH2)
   2919 		sc->sc_flags |= WM_F_EEE;
   2920 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2921 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2922 		/* XXX: Need special handling for I354. (not yet) */
   2923 		if (sc->sc_type != WM_T_I354)
   2924 			sc->sc_flags |= WM_F_EEE;
   2925 	}
   2926 
   2927 	/*
   2928 	 * The I350 has a bug where it always strips the CRC whether
   2929 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   2930 	 */
   2931 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2932 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2933 		sc->sc_flags |= WM_F_CRC_STRIP;
   2934 
   2935 	/* Set device properties (macflags) */
   2936 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2937 
   2938 	if (sc->sc_flags != 0) {
   2939 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2940 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2941 	}
   2942 
   2943 #ifdef WM_MPSAFE
   2944 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2945 #else
   2946 	sc->sc_core_lock = NULL;
   2947 #endif
   2948 
   2949 	/* Initialize the media structures accordingly. */
   2950 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2951 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2952 	else
   2953 		wm_tbi_mediainit(sc); /* All others */
   2954 
   2955 	ifp = &sc->sc_ethercom.ec_if;
   2956 	xname = device_xname(sc->sc_dev);
   2957 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2958 	ifp->if_softc = sc;
   2959 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2960 #ifdef WM_MPSAFE
   2961 	ifp->if_extflags = IFEF_MPSAFE;
   2962 #endif
   2963 	ifp->if_ioctl = wm_ioctl;
   2964 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2965 		ifp->if_start = wm_nq_start;
   2966 		/*
   2967 		 * When the number of CPUs is one and the controller can use
   2968 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2969 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2970 		 * and the other is used for link status changing.
   2971 		 * In this situation, wm_nq_transmit() is disadvantageous
   2972 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2973 		 */
   2974 		if (wm_is_using_multiqueue(sc))
   2975 			ifp->if_transmit = wm_nq_transmit;
   2976 	} else {
   2977 		ifp->if_start = wm_start;
   2978 		/*
   2979 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2980 		 */
   2981 		if (wm_is_using_multiqueue(sc))
   2982 			ifp->if_transmit = wm_transmit;
   2983 	}
   2984 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2985 	ifp->if_init = wm_init;
   2986 	ifp->if_stop = wm_stop;
   2987 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2988 	IFQ_SET_READY(&ifp->if_snd);
   2989 
   2990 	/* Check for jumbo frame */
   2991 	switch (sc->sc_type) {
   2992 	case WM_T_82573:
   2993 		/* XXX limited to 9234 if ASPM is disabled */
   2994 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2995 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2996 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2997 		break;
   2998 	case WM_T_82571:
   2999 	case WM_T_82572:
   3000 	case WM_T_82574:
   3001 	case WM_T_82583:
   3002 	case WM_T_82575:
   3003 	case WM_T_82576:
   3004 	case WM_T_82580:
   3005 	case WM_T_I350:
   3006 	case WM_T_I354:
   3007 	case WM_T_I210:
   3008 	case WM_T_I211:
   3009 	case WM_T_80003:
   3010 	case WM_T_ICH9:
   3011 	case WM_T_ICH10:
   3012 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3013 	case WM_T_PCH_LPT:
   3014 	case WM_T_PCH_SPT:
   3015 	case WM_T_PCH_CNP:
   3016 		/* XXX limited to 9234 */
   3017 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3018 		break;
   3019 	case WM_T_PCH:
   3020 		/* XXX limited to 4096 */
   3021 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3022 		break;
   3023 	case WM_T_82542_2_0:
   3024 	case WM_T_82542_2_1:
   3025 	case WM_T_ICH8:
   3026 		/* No support for jumbo frame */
   3027 		break;
   3028 	default:
   3029 		/* ETHER_MAX_LEN_JUMBO */
   3030 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3031 		break;
   3032 	}
   3033 
   3034 	/* If we're a i82543 or greater, we can support VLANs. */
   3035 	if (sc->sc_type >= WM_T_82543) {
   3036 		sc->sc_ethercom.ec_capabilities |=
   3037 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3038 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3039 	}
   3040 
   3041 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3042 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3043 
   3044 	/*
   3045 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   3046 	 * on i82543 and later.
   3047 	 */
   3048 	if (sc->sc_type >= WM_T_82543) {
   3049 		ifp->if_capabilities |=
   3050 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3051 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3052 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3053 		    IFCAP_CSUM_TCPv6_Tx |
   3054 		    IFCAP_CSUM_UDPv6_Tx;
   3055 	}
   3056 
   3057 	/*
   3058 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3059 	 *
   3060 	 *	82541GI (8086:1076) ... no
   3061 	 *	82572EI (8086:10b9) ... yes
   3062 	 */
   3063 	if (sc->sc_type >= WM_T_82571) {
   3064 		ifp->if_capabilities |=
   3065 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3066 	}
   3067 
   3068 	/*
   3069 	 * If we're a i82544 or greater (except i82547), we can do
   3070 	 * TCP segmentation offload.
   3071 	 */
   3072 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3073 		ifp->if_capabilities |= IFCAP_TSOv4;
   3074 	}
   3075 
   3076 	if (sc->sc_type >= WM_T_82571) {
   3077 		ifp->if_capabilities |= IFCAP_TSOv6;
   3078 	}
   3079 
   3080 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3081 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3082 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3083 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3084 
   3085 	/* Attach the interface. */
   3086 	error = if_initialize(ifp);
   3087 	if (error != 0) {
   3088 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   3089 		    error);
   3090 		return; /* Error */
   3091 	}
   3092 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3093 	ether_ifattach(ifp, enaddr);
   3094 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3095 	if_register(ifp);
   3096 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3097 	    RND_FLAG_DEFAULT);
   3098 
   3099 #ifdef WM_EVENT_COUNTERS
   3100 	/* Attach event counters. */
   3101 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3102 	    NULL, xname, "linkintr");
   3103 
   3104 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3105 	    NULL, xname, "tx_xoff");
   3106 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3107 	    NULL, xname, "tx_xon");
   3108 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3109 	    NULL, xname, "rx_xoff");
   3110 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3111 	    NULL, xname, "rx_xon");
   3112 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3113 	    NULL, xname, "rx_macctl");
   3114 #endif /* WM_EVENT_COUNTERS */
   3115 
   3116 	sc->sc_txrx_use_workqueue = false;
   3117 
   3118 	if (wm_phy_need_linkdown_discard(sc))
   3119 		wm_set_linkdown_discard(sc);
   3120 
   3121 	wm_init_sysctls(sc);
   3122 
   3123 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3124 		pmf_class_network_register(self, ifp);
   3125 	else
   3126 		aprint_error_dev(self, "couldn't establish power handler\n");
   3127 
   3128 	sc->sc_flags |= WM_F_ATTACHED;
   3129 out:
   3130 	return;
   3131 }
   3132 
   3133 /* The detach function (ca_detach) */
   3134 static int
   3135 wm_detach(device_t self, int flags __unused)
   3136 {
   3137 	struct wm_softc *sc = device_private(self);
   3138 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3139 	int i;
   3140 
   3141 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3142 		return 0;
   3143 
   3144 	/* Stop the interface. Callouts are stopped in it. */
   3145 	wm_stop(ifp, 1);
   3146 
   3147 	pmf_device_deregister(self);
   3148 
   3149 	sysctl_teardown(&sc->sc_sysctllog);
   3150 
   3151 #ifdef WM_EVENT_COUNTERS
   3152 	evcnt_detach(&sc->sc_ev_linkintr);
   3153 
   3154 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3155 	evcnt_detach(&sc->sc_ev_tx_xon);
   3156 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3157 	evcnt_detach(&sc->sc_ev_rx_xon);
   3158 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3159 #endif /* WM_EVENT_COUNTERS */
   3160 
   3161 	rnd_detach_source(&sc->rnd_source);
   3162 
   3163 	/* Tell the firmware about the release */
   3164 	WM_CORE_LOCK(sc);
   3165 	wm_release_manageability(sc);
   3166 	wm_release_hw_control(sc);
   3167 	wm_enable_wakeup(sc);
   3168 	WM_CORE_UNLOCK(sc);
   3169 
   3170 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3171 
   3172 	ether_ifdetach(ifp);
   3173 	if_detach(ifp);
   3174 	if_percpuq_destroy(sc->sc_ipq);
   3175 
   3176 	/* Delete all remaining media. */
   3177 	ifmedia_fini(&sc->sc_mii.mii_media);
   3178 
   3179 	/* Unload RX dmamaps and free mbufs */
   3180 	for (i = 0; i < sc->sc_nqueues; i++) {
   3181 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3182 		mutex_enter(rxq->rxq_lock);
   3183 		wm_rxdrain(rxq);
   3184 		mutex_exit(rxq->rxq_lock);
   3185 	}
   3186 	/* Must unlock here */
   3187 
   3188 	/* Disestablish the interrupt handler */
   3189 	for (i = 0; i < sc->sc_nintrs; i++) {
   3190 		if (sc->sc_ihs[i] != NULL) {
   3191 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3192 			sc->sc_ihs[i] = NULL;
   3193 		}
   3194 	}
   3195 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3196 
   3197 	/* wm_stop() ensure workqueue is stopped. */
   3198 	workqueue_destroy(sc->sc_queue_wq);
   3199 
   3200 	for (i = 0; i < sc->sc_nqueues; i++)
   3201 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3202 
   3203 	wm_free_txrx_queues(sc);
   3204 
   3205 	/* Unmap the registers */
   3206 	if (sc->sc_ss) {
   3207 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3208 		sc->sc_ss = 0;
   3209 	}
   3210 	if (sc->sc_ios) {
   3211 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3212 		sc->sc_ios = 0;
   3213 	}
   3214 	if (sc->sc_flashs) {
   3215 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3216 		sc->sc_flashs = 0;
   3217 	}
   3218 
   3219 	if (sc->sc_core_lock)
   3220 		mutex_obj_free(sc->sc_core_lock);
   3221 	if (sc->sc_ich_phymtx)
   3222 		mutex_obj_free(sc->sc_ich_phymtx);
   3223 	if (sc->sc_ich_nvmmtx)
   3224 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3225 
   3226 	return 0;
   3227 }
   3228 
   3229 static bool
   3230 wm_suspend(device_t self, const pmf_qual_t *qual)
   3231 {
   3232 	struct wm_softc *sc = device_private(self);
   3233 
   3234 	wm_release_manageability(sc);
   3235 	wm_release_hw_control(sc);
   3236 	wm_enable_wakeup(sc);
   3237 
   3238 	return true;
   3239 }
   3240 
   3241 static bool
   3242 wm_resume(device_t self, const pmf_qual_t *qual)
   3243 {
   3244 	struct wm_softc *sc = device_private(self);
   3245 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3246 	pcireg_t reg;
   3247 	char buf[256];
   3248 
   3249 	reg = CSR_READ(sc, WMREG_WUS);
   3250 	if (reg != 0) {
   3251 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3252 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3253 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3254 	}
   3255 
   3256 	if (sc->sc_type >= WM_T_PCH2)
   3257 		wm_resume_workarounds_pchlan(sc);
   3258 	if ((ifp->if_flags & IFF_UP) == 0) {
   3259 		wm_reset(sc);
   3260 		/* Non-AMT based hardware can now take control from firmware */
   3261 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3262 			wm_get_hw_control(sc);
   3263 		wm_init_manageability(sc);
   3264 	} else {
   3265 		/*
   3266 		 * We called pmf_class_network_register(), so if_init() is
   3267 		 * automatically called when IFF_UP. wm_reset(),
   3268 		 * wm_get_hw_control() and wm_init_manageability() are called
   3269 		 * via wm_init().
   3270 		 */
   3271 	}
   3272 
   3273 	return true;
   3274 }
   3275 
   3276 /*
   3277  * wm_watchdog:		[ifnet interface function]
   3278  *
   3279  *	Watchdog timer handler.
   3280  */
   3281 static void
   3282 wm_watchdog(struct ifnet *ifp)
   3283 {
   3284 	int qid;
   3285 	struct wm_softc *sc = ifp->if_softc;
   3286 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3287 
   3288 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3289 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3290 
   3291 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3292 	}
   3293 
   3294 	/* IF any of queues hanged up, reset the interface. */
   3295 	if (hang_queue != 0) {
   3296 		(void)wm_init(ifp);
   3297 
   3298 		/*
   3299 		 * There are still some upper layer processing which call
   3300 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3301 		 */
   3302 		/* Try to get more packets going. */
   3303 		ifp->if_start(ifp);
   3304 	}
   3305 }
   3306 
   3307 
   3308 static void
   3309 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3310 {
   3311 
   3312 	mutex_enter(txq->txq_lock);
   3313 	if (txq->txq_sending &&
   3314 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3315 		wm_watchdog_txq_locked(ifp, txq, hang);
   3316 
   3317 	mutex_exit(txq->txq_lock);
   3318 }
   3319 
   3320 static void
   3321 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3322     uint16_t *hang)
   3323 {
   3324 	struct wm_softc *sc = ifp->if_softc;
   3325 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3326 
   3327 	KASSERT(mutex_owned(txq->txq_lock));
   3328 
   3329 	/*
   3330 	 * Since we're using delayed interrupts, sweep up
   3331 	 * before we report an error.
   3332 	 */
   3333 	wm_txeof(txq, UINT_MAX);
   3334 
   3335 	if (txq->txq_sending)
   3336 		*hang |= __BIT(wmq->wmq_id);
   3337 
   3338 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3339 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3340 		    device_xname(sc->sc_dev));
   3341 	} else {
   3342 #ifdef WM_DEBUG
   3343 		int i, j;
   3344 		struct wm_txsoft *txs;
   3345 #endif
   3346 		log(LOG_ERR,
   3347 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3348 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3349 		    txq->txq_next);
   3350 		if_statinc(ifp, if_oerrors);
   3351 #ifdef WM_DEBUG
   3352 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3353 		    i = WM_NEXTTXS(txq, i)) {
   3354 			txs = &txq->txq_soft[i];
   3355 			printf("txs %d tx %d -> %d\n",
   3356 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3357 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3358 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3359 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3360 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3361 					printf("\t %#08x%08x\n",
   3362 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3363 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3364 				} else {
   3365 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3366 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3367 					    txq->txq_descs[j].wtx_addr.wa_low);
   3368 					printf("\t %#04x%02x%02x%08x\n",
   3369 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3370 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3371 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3372 					    txq->txq_descs[j].wtx_cmdlen);
   3373 				}
   3374 				if (j == txs->txs_lastdesc)
   3375 					break;
   3376 			}
   3377 		}
   3378 #endif
   3379 	}
   3380 }
   3381 
   3382 /*
   3383  * wm_tick:
   3384  *
   3385  *	One second timer, used to check link status, sweep up
   3386  *	completed transmit jobs, etc.
   3387  */
   3388 static void
   3389 wm_tick(void *arg)
   3390 {
   3391 	struct wm_softc *sc = arg;
   3392 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3393 #ifndef WM_MPSAFE
   3394 	int s = splnet();
   3395 #endif
   3396 
   3397 	WM_CORE_LOCK(sc);
   3398 
   3399 	if (sc->sc_core_stopping) {
   3400 		WM_CORE_UNLOCK(sc);
   3401 #ifndef WM_MPSAFE
   3402 		splx(s);
   3403 #endif
   3404 		return;
   3405 	}
   3406 
   3407 	if (sc->sc_type >= WM_T_82542_2_1) {
   3408 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3409 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3410 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3411 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3412 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3413 	}
   3414 
   3415 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3416 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3417 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3418 	    + CSR_READ(sc, WMREG_CRCERRS)
   3419 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3420 	    + CSR_READ(sc, WMREG_SYMERRC)
   3421 	    + CSR_READ(sc, WMREG_RXERRC)
   3422 	    + CSR_READ(sc, WMREG_SEC)
   3423 	    + CSR_READ(sc, WMREG_CEXTERR)
   3424 	    + CSR_READ(sc, WMREG_RLEC));
   3425 	/*
   3426 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3427 	 * memory. It does not mean the number of dropped packet. Because
   3428 	 * ethernet controller can receive packets in such case if there is
   3429 	 * space in phy's FIFO.
   3430 	 *
   3431 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3432 	 * own EVCNT instead of if_iqdrops.
   3433 	 */
   3434 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3435 	IF_STAT_PUTREF(ifp);
   3436 
   3437 	if (sc->sc_flags & WM_F_HAS_MII)
   3438 		mii_tick(&sc->sc_mii);
   3439 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3440 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3441 		wm_serdes_tick(sc);
   3442 	else
   3443 		wm_tbi_tick(sc);
   3444 
   3445 	WM_CORE_UNLOCK(sc);
   3446 
   3447 	wm_watchdog(ifp);
   3448 
   3449 	callout_schedule(&sc->sc_tick_ch, hz);
   3450 }
   3451 
   3452 static int
   3453 wm_ifflags_cb(struct ethercom *ec)
   3454 {
   3455 	struct ifnet *ifp = &ec->ec_if;
   3456 	struct wm_softc *sc = ifp->if_softc;
   3457 	u_short iffchange;
   3458 	int ecchange;
   3459 	bool needreset = false;
   3460 	int rc = 0;
   3461 
   3462 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3463 		device_xname(sc->sc_dev), __func__));
   3464 
   3465 	WM_CORE_LOCK(sc);
   3466 
   3467 	/*
   3468 	 * Check for if_flags.
   3469 	 * Main usage is to prevent linkdown when opening bpf.
   3470 	 */
   3471 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3472 	sc->sc_if_flags = ifp->if_flags;
   3473 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3474 		needreset = true;
   3475 		goto ec;
   3476 	}
   3477 
   3478 	/* iff related updates */
   3479 	if ((iffchange & IFF_PROMISC) != 0)
   3480 		wm_set_filter(sc);
   3481 
   3482 	wm_set_vlan(sc);
   3483 
   3484 ec:
   3485 	/* Check for ec_capenable. */
   3486 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3487 	sc->sc_ec_capenable = ec->ec_capenable;
   3488 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3489 		needreset = true;
   3490 		goto out;
   3491 	}
   3492 
   3493 	/* ec related updates */
   3494 	wm_set_eee(sc);
   3495 
   3496 out:
   3497 	if (needreset)
   3498 		rc = ENETRESET;
   3499 	WM_CORE_UNLOCK(sc);
   3500 
   3501 	return rc;
   3502 }
   3503 
   3504 static bool
   3505 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3506 {
   3507 
   3508 	switch(sc->sc_phytype) {
   3509 	case WMPHY_82577: /* ihphy */
   3510 	case WMPHY_82578: /* atphy */
   3511 	case WMPHY_82579: /* ihphy */
   3512 	case WMPHY_I217: /* ihphy */
   3513 	case WMPHY_82580: /* ihphy */
   3514 	case WMPHY_I350: /* ihphy */
   3515 		return true;
   3516 	default:
   3517 		return false;
   3518 	}
   3519 }
   3520 
   3521 static void
   3522 wm_set_linkdown_discard(struct wm_softc *sc)
   3523 {
   3524 
   3525 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3526 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3527 
   3528 		mutex_enter(txq->txq_lock);
   3529 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3530 		mutex_exit(txq->txq_lock);
   3531 	}
   3532 }
   3533 
   3534 static void
   3535 wm_clear_linkdown_discard(struct wm_softc *sc)
   3536 {
   3537 
   3538 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3539 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3540 
   3541 		mutex_enter(txq->txq_lock);
   3542 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   3543 		mutex_exit(txq->txq_lock);
   3544 	}
   3545 }
   3546 
   3547 /*
   3548  * wm_ioctl:		[ifnet interface function]
   3549  *
   3550  *	Handle control requests from the operator.
   3551  */
   3552 static int
   3553 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3554 {
   3555 	struct wm_softc *sc = ifp->if_softc;
   3556 	struct ifreq *ifr = (struct ifreq *)data;
   3557 	struct ifaddr *ifa = (struct ifaddr *)data;
   3558 	struct sockaddr_dl *sdl;
   3559 	int s, error;
   3560 
   3561 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3562 		device_xname(sc->sc_dev), __func__));
   3563 
   3564 #ifndef WM_MPSAFE
   3565 	s = splnet();
   3566 #endif
   3567 	switch (cmd) {
   3568 	case SIOCSIFMEDIA:
   3569 		WM_CORE_LOCK(sc);
   3570 		/* Flow control requires full-duplex mode. */
   3571 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3572 		    (ifr->ifr_media & IFM_FDX) == 0)
   3573 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3574 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3575 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3576 				/* We can do both TXPAUSE and RXPAUSE. */
   3577 				ifr->ifr_media |=
   3578 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3579 			}
   3580 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3581 		}
   3582 		WM_CORE_UNLOCK(sc);
   3583 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3584 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   3585 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE)
   3586 				wm_set_linkdown_discard(sc);
   3587 			else
   3588 				wm_clear_linkdown_discard(sc);
   3589 		}
   3590 		break;
   3591 	case SIOCINITIFADDR:
   3592 		WM_CORE_LOCK(sc);
   3593 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3594 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3595 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3596 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3597 			/* Unicast address is the first multicast entry */
   3598 			wm_set_filter(sc);
   3599 			error = 0;
   3600 			WM_CORE_UNLOCK(sc);
   3601 			break;
   3602 		}
   3603 		WM_CORE_UNLOCK(sc);
   3604 		if (((ifp->if_flags & IFF_UP) == 0) && wm_phy_need_linkdown_discard(sc))
   3605 			wm_clear_linkdown_discard(sc);
   3606 		/*FALLTHROUGH*/
   3607 	default:
   3608 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   3609 			if (((ifp->if_flags & IFF_UP) == 0) && ((ifr->ifr_flags & IFF_UP) != 0)) {
   3610 				wm_clear_linkdown_discard(sc);
   3611 			} else if (((ifp->if_flags & IFF_UP) != 0) && ((ifr->ifr_flags & IFF_UP) == 0)) {
   3612 				wm_set_linkdown_discard(sc);
   3613 			}
   3614 		}
   3615 #ifdef WM_MPSAFE
   3616 		s = splnet();
   3617 #endif
   3618 		/* It may call wm_start, so unlock here */
   3619 		error = ether_ioctl(ifp, cmd, data);
   3620 #ifdef WM_MPSAFE
   3621 		splx(s);
   3622 #endif
   3623 		if (error != ENETRESET)
   3624 			break;
   3625 
   3626 		error = 0;
   3627 
   3628 		if (cmd == SIOCSIFCAP)
   3629 			error = (*ifp->if_init)(ifp);
   3630 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3631 			;
   3632 		else if (ifp->if_flags & IFF_RUNNING) {
   3633 			/*
   3634 			 * Multicast list has changed; set the hardware filter
   3635 			 * accordingly.
   3636 			 */
   3637 			WM_CORE_LOCK(sc);
   3638 			wm_set_filter(sc);
   3639 			WM_CORE_UNLOCK(sc);
   3640 		}
   3641 		break;
   3642 	}
   3643 
   3644 #ifndef WM_MPSAFE
   3645 	splx(s);
   3646 #endif
   3647 	return error;
   3648 }
   3649 
   3650 /* MAC address related */
   3651 
   3652 /*
   3653  * Get the offset of MAC address and return it.
   3654  * If error occured, use offset 0.
   3655  */
   3656 static uint16_t
   3657 wm_check_alt_mac_addr(struct wm_softc *sc)
   3658 {
   3659 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3660 	uint16_t offset = NVM_OFF_MACADDR;
   3661 
   3662 	/* Try to read alternative MAC address pointer */
   3663 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3664 		return 0;
   3665 
   3666 	/* Check pointer if it's valid or not. */
   3667 	if ((offset == 0x0000) || (offset == 0xffff))
   3668 		return 0;
   3669 
   3670 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3671 	/*
   3672 	 * Check whether alternative MAC address is valid or not.
   3673 	 * Some cards have non 0xffff pointer but those don't use
   3674 	 * alternative MAC address in reality.
   3675 	 *
   3676 	 * Check whether the broadcast bit is set or not.
   3677 	 */
   3678 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3679 		if (((myea[0] & 0xff) & 0x01) == 0)
   3680 			return offset; /* Found */
   3681 
   3682 	/* Not found */
   3683 	return 0;
   3684 }
   3685 
   3686 static int
   3687 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3688 {
   3689 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3690 	uint16_t offset = NVM_OFF_MACADDR;
   3691 	int do_invert = 0;
   3692 
   3693 	switch (sc->sc_type) {
   3694 	case WM_T_82580:
   3695 	case WM_T_I350:
   3696 	case WM_T_I354:
   3697 		/* EEPROM Top Level Partitioning */
   3698 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3699 		break;
   3700 	case WM_T_82571:
   3701 	case WM_T_82575:
   3702 	case WM_T_82576:
   3703 	case WM_T_80003:
   3704 	case WM_T_I210:
   3705 	case WM_T_I211:
   3706 		offset = wm_check_alt_mac_addr(sc);
   3707 		if (offset == 0)
   3708 			if ((sc->sc_funcid & 0x01) == 1)
   3709 				do_invert = 1;
   3710 		break;
   3711 	default:
   3712 		if ((sc->sc_funcid & 0x01) == 1)
   3713 			do_invert = 1;
   3714 		break;
   3715 	}
   3716 
   3717 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3718 		goto bad;
   3719 
   3720 	enaddr[0] = myea[0] & 0xff;
   3721 	enaddr[1] = myea[0] >> 8;
   3722 	enaddr[2] = myea[1] & 0xff;
   3723 	enaddr[3] = myea[1] >> 8;
   3724 	enaddr[4] = myea[2] & 0xff;
   3725 	enaddr[5] = myea[2] >> 8;
   3726 
   3727 	/*
   3728 	 * Toggle the LSB of the MAC address on the second port
   3729 	 * of some dual port cards.
   3730 	 */
   3731 	if (do_invert != 0)
   3732 		enaddr[5] ^= 1;
   3733 
   3734 	return 0;
   3735 
   3736  bad:
   3737 	return -1;
   3738 }
   3739 
   3740 /*
   3741  * wm_set_ral:
   3742  *
   3743  *	Set an entery in the receive address list.
   3744  */
   3745 static void
   3746 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3747 {
   3748 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3749 	uint32_t wlock_mac;
   3750 	int rv;
   3751 
   3752 	if (enaddr != NULL) {
   3753 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3754 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3755 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3756 		ral_hi |= RAL_AV;
   3757 	} else {
   3758 		ral_lo = 0;
   3759 		ral_hi = 0;
   3760 	}
   3761 
   3762 	switch (sc->sc_type) {
   3763 	case WM_T_82542_2_0:
   3764 	case WM_T_82542_2_1:
   3765 	case WM_T_82543:
   3766 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3767 		CSR_WRITE_FLUSH(sc);
   3768 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3769 		CSR_WRITE_FLUSH(sc);
   3770 		break;
   3771 	case WM_T_PCH2:
   3772 	case WM_T_PCH_LPT:
   3773 	case WM_T_PCH_SPT:
   3774 	case WM_T_PCH_CNP:
   3775 		if (idx == 0) {
   3776 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3777 			CSR_WRITE_FLUSH(sc);
   3778 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3779 			CSR_WRITE_FLUSH(sc);
   3780 			return;
   3781 		}
   3782 		if (sc->sc_type != WM_T_PCH2) {
   3783 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3784 			    FWSM_WLOCK_MAC);
   3785 			addrl = WMREG_SHRAL(idx - 1);
   3786 			addrh = WMREG_SHRAH(idx - 1);
   3787 		} else {
   3788 			wlock_mac = 0;
   3789 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3790 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3791 		}
   3792 
   3793 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3794 			rv = wm_get_swflag_ich8lan(sc);
   3795 			if (rv != 0)
   3796 				return;
   3797 			CSR_WRITE(sc, addrl, ral_lo);
   3798 			CSR_WRITE_FLUSH(sc);
   3799 			CSR_WRITE(sc, addrh, ral_hi);
   3800 			CSR_WRITE_FLUSH(sc);
   3801 			wm_put_swflag_ich8lan(sc);
   3802 		}
   3803 
   3804 		break;
   3805 	default:
   3806 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3807 		CSR_WRITE_FLUSH(sc);
   3808 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3809 		CSR_WRITE_FLUSH(sc);
   3810 		break;
   3811 	}
   3812 }
   3813 
   3814 /*
   3815  * wm_mchash:
   3816  *
   3817  *	Compute the hash of the multicast address for the 4096-bit
   3818  *	multicast filter.
   3819  */
   3820 static uint32_t
   3821 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3822 {
   3823 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3824 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3825 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3826 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3827 	uint32_t hash;
   3828 
   3829 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3830 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3831 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3832 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3833 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3834 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3835 		return (hash & 0x3ff);
   3836 	}
   3837 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3838 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3839 
   3840 	return (hash & 0xfff);
   3841 }
   3842 
   3843 /*
   3844  *
   3845  *
   3846  */
   3847 static int
   3848 wm_rar_count(struct wm_softc *sc)
   3849 {
   3850 	int size;
   3851 
   3852 	switch (sc->sc_type) {
   3853 	case WM_T_ICH8:
   3854 		size = WM_RAL_TABSIZE_ICH8 -1;
   3855 		break;
   3856 	case WM_T_ICH9:
   3857 	case WM_T_ICH10:
   3858 	case WM_T_PCH:
   3859 		size = WM_RAL_TABSIZE_ICH8;
   3860 		break;
   3861 	case WM_T_PCH2:
   3862 		size = WM_RAL_TABSIZE_PCH2;
   3863 		break;
   3864 	case WM_T_PCH_LPT:
   3865 	case WM_T_PCH_SPT:
   3866 	case WM_T_PCH_CNP:
   3867 		size = WM_RAL_TABSIZE_PCH_LPT;
   3868 		break;
   3869 	case WM_T_82575:
   3870 	case WM_T_I210:
   3871 	case WM_T_I211:
   3872 		size = WM_RAL_TABSIZE_82575;
   3873 		break;
   3874 	case WM_T_82576:
   3875 	case WM_T_82580:
   3876 		size = WM_RAL_TABSIZE_82576;
   3877 		break;
   3878 	case WM_T_I350:
   3879 	case WM_T_I354:
   3880 		size = WM_RAL_TABSIZE_I350;
   3881 		break;
   3882 	default:
   3883 		size = WM_RAL_TABSIZE;
   3884 	}
   3885 
   3886 	return size;
   3887 }
   3888 
   3889 /*
   3890  * wm_set_filter:
   3891  *
   3892  *	Set up the receive filter.
   3893  */
   3894 static void
   3895 wm_set_filter(struct wm_softc *sc)
   3896 {
   3897 	struct ethercom *ec = &sc->sc_ethercom;
   3898 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3899 	struct ether_multi *enm;
   3900 	struct ether_multistep step;
   3901 	bus_addr_t mta_reg;
   3902 	uint32_t hash, reg, bit;
   3903 	int i, size, ralmax, rv;
   3904 
   3905 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3906 		device_xname(sc->sc_dev), __func__));
   3907 
   3908 	if (sc->sc_type >= WM_T_82544)
   3909 		mta_reg = WMREG_CORDOVA_MTA;
   3910 	else
   3911 		mta_reg = WMREG_MTA;
   3912 
   3913 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3914 
   3915 	if (ifp->if_flags & IFF_BROADCAST)
   3916 		sc->sc_rctl |= RCTL_BAM;
   3917 	if (ifp->if_flags & IFF_PROMISC) {
   3918 		sc->sc_rctl |= RCTL_UPE;
   3919 		ETHER_LOCK(ec);
   3920 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3921 		ETHER_UNLOCK(ec);
   3922 		goto allmulti;
   3923 	}
   3924 
   3925 	/*
   3926 	 * Set the station address in the first RAL slot, and
   3927 	 * clear the remaining slots.
   3928 	 */
   3929 	size = wm_rar_count(sc);
   3930 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3931 
   3932 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3933 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3934 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3935 		switch (i) {
   3936 		case 0:
   3937 			/* We can use all entries */
   3938 			ralmax = size;
   3939 			break;
   3940 		case 1:
   3941 			/* Only RAR[0] */
   3942 			ralmax = 1;
   3943 			break;
   3944 		default:
   3945 			/* Available SHRA + RAR[0] */
   3946 			ralmax = i + 1;
   3947 		}
   3948 	} else
   3949 		ralmax = size;
   3950 	for (i = 1; i < size; i++) {
   3951 		if (i < ralmax)
   3952 			wm_set_ral(sc, NULL, i);
   3953 	}
   3954 
   3955 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3956 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3957 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3958 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3959 		size = WM_ICH8_MC_TABSIZE;
   3960 	else
   3961 		size = WM_MC_TABSIZE;
   3962 	/* Clear out the multicast table. */
   3963 	for (i = 0; i < size; i++) {
   3964 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3965 		CSR_WRITE_FLUSH(sc);
   3966 	}
   3967 
   3968 	ETHER_LOCK(ec);
   3969 	ETHER_FIRST_MULTI(step, ec, enm);
   3970 	while (enm != NULL) {
   3971 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3972 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3973 			ETHER_UNLOCK(ec);
   3974 			/*
   3975 			 * We must listen to a range of multicast addresses.
   3976 			 * For now, just accept all multicasts, rather than
   3977 			 * trying to set only those filter bits needed to match
   3978 			 * the range.  (At this time, the only use of address
   3979 			 * ranges is for IP multicast routing, for which the
   3980 			 * range is big enough to require all bits set.)
   3981 			 */
   3982 			goto allmulti;
   3983 		}
   3984 
   3985 		hash = wm_mchash(sc, enm->enm_addrlo);
   3986 
   3987 		reg = (hash >> 5);
   3988 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3989 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3990 		    || (sc->sc_type == WM_T_PCH2)
   3991 		    || (sc->sc_type == WM_T_PCH_LPT)
   3992 		    || (sc->sc_type == WM_T_PCH_SPT)
   3993 		    || (sc->sc_type == WM_T_PCH_CNP))
   3994 			reg &= 0x1f;
   3995 		else
   3996 			reg &= 0x7f;
   3997 		bit = hash & 0x1f;
   3998 
   3999 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4000 		hash |= 1U << bit;
   4001 
   4002 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4003 			/*
   4004 			 * 82544 Errata 9: Certain register cannot be written
   4005 			 * with particular alignments in PCI-X bus operation
   4006 			 * (FCAH, MTA and VFTA).
   4007 			 */
   4008 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4009 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4010 			CSR_WRITE_FLUSH(sc);
   4011 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4012 			CSR_WRITE_FLUSH(sc);
   4013 		} else {
   4014 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4015 			CSR_WRITE_FLUSH(sc);
   4016 		}
   4017 
   4018 		ETHER_NEXT_MULTI(step, enm);
   4019 	}
   4020 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4021 	ETHER_UNLOCK(ec);
   4022 
   4023 	goto setit;
   4024 
   4025  allmulti:
   4026 	sc->sc_rctl |= RCTL_MPE;
   4027 
   4028  setit:
   4029 	if (sc->sc_type >= WM_T_PCH2) {
   4030 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4031 		    && (ifp->if_mtu > ETHERMTU))
   4032 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4033 		else
   4034 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4035 		if (rv != 0)
   4036 			device_printf(sc->sc_dev,
   4037 			    "Failed to do workaround for jumbo frame.\n");
   4038 	}
   4039 
   4040 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4041 }
   4042 
   4043 /* Reset and init related */
   4044 
   4045 static void
   4046 wm_set_vlan(struct wm_softc *sc)
   4047 {
   4048 
   4049 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4050 		device_xname(sc->sc_dev), __func__));
   4051 
   4052 	/* Deal with VLAN enables. */
   4053 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4054 		sc->sc_ctrl |= CTRL_VME;
   4055 	else
   4056 		sc->sc_ctrl &= ~CTRL_VME;
   4057 
   4058 	/* Write the control registers. */
   4059 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4060 }
   4061 
   4062 static void
   4063 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4064 {
   4065 	uint32_t gcr;
   4066 	pcireg_t ctrl2;
   4067 
   4068 	gcr = CSR_READ(sc, WMREG_GCR);
   4069 
   4070 	/* Only take action if timeout value is defaulted to 0 */
   4071 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4072 		goto out;
   4073 
   4074 	if ((gcr & GCR_CAP_VER2) == 0) {
   4075 		gcr |= GCR_CMPL_TMOUT_10MS;
   4076 		goto out;
   4077 	}
   4078 
   4079 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4080 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4081 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4082 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4083 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4084 
   4085 out:
   4086 	/* Disable completion timeout resend */
   4087 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4088 
   4089 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4090 }
   4091 
   4092 void
   4093 wm_get_auto_rd_done(struct wm_softc *sc)
   4094 {
   4095 	int i;
   4096 
   4097 	/* wait for eeprom to reload */
   4098 	switch (sc->sc_type) {
   4099 	case WM_T_82571:
   4100 	case WM_T_82572:
   4101 	case WM_T_82573:
   4102 	case WM_T_82574:
   4103 	case WM_T_82583:
   4104 	case WM_T_82575:
   4105 	case WM_T_82576:
   4106 	case WM_T_82580:
   4107 	case WM_T_I350:
   4108 	case WM_T_I354:
   4109 	case WM_T_I210:
   4110 	case WM_T_I211:
   4111 	case WM_T_80003:
   4112 	case WM_T_ICH8:
   4113 	case WM_T_ICH9:
   4114 		for (i = 0; i < 10; i++) {
   4115 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4116 				break;
   4117 			delay(1000);
   4118 		}
   4119 		if (i == 10) {
   4120 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4121 			    "complete\n", device_xname(sc->sc_dev));
   4122 		}
   4123 		break;
   4124 	default:
   4125 		break;
   4126 	}
   4127 }
   4128 
   4129 void
   4130 wm_lan_init_done(struct wm_softc *sc)
   4131 {
   4132 	uint32_t reg = 0;
   4133 	int i;
   4134 
   4135 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4136 		device_xname(sc->sc_dev), __func__));
   4137 
   4138 	/* Wait for eeprom to reload */
   4139 	switch (sc->sc_type) {
   4140 	case WM_T_ICH10:
   4141 	case WM_T_PCH:
   4142 	case WM_T_PCH2:
   4143 	case WM_T_PCH_LPT:
   4144 	case WM_T_PCH_SPT:
   4145 	case WM_T_PCH_CNP:
   4146 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4147 			reg = CSR_READ(sc, WMREG_STATUS);
   4148 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4149 				break;
   4150 			delay(100);
   4151 		}
   4152 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4153 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4154 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4155 		}
   4156 		break;
   4157 	default:
   4158 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4159 		    __func__);
   4160 		break;
   4161 	}
   4162 
   4163 	reg &= ~STATUS_LAN_INIT_DONE;
   4164 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4165 }
   4166 
   4167 void
   4168 wm_get_cfg_done(struct wm_softc *sc)
   4169 {
   4170 	int mask;
   4171 	uint32_t reg;
   4172 	int i;
   4173 
   4174 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4175 		device_xname(sc->sc_dev), __func__));
   4176 
   4177 	/* Wait for eeprom to reload */
   4178 	switch (sc->sc_type) {
   4179 	case WM_T_82542_2_0:
   4180 	case WM_T_82542_2_1:
   4181 		/* null */
   4182 		break;
   4183 	case WM_T_82543:
   4184 	case WM_T_82544:
   4185 	case WM_T_82540:
   4186 	case WM_T_82545:
   4187 	case WM_T_82545_3:
   4188 	case WM_T_82546:
   4189 	case WM_T_82546_3:
   4190 	case WM_T_82541:
   4191 	case WM_T_82541_2:
   4192 	case WM_T_82547:
   4193 	case WM_T_82547_2:
   4194 	case WM_T_82573:
   4195 	case WM_T_82574:
   4196 	case WM_T_82583:
   4197 		/* generic */
   4198 		delay(10*1000);
   4199 		break;
   4200 	case WM_T_80003:
   4201 	case WM_T_82571:
   4202 	case WM_T_82572:
   4203 	case WM_T_82575:
   4204 	case WM_T_82576:
   4205 	case WM_T_82580:
   4206 	case WM_T_I350:
   4207 	case WM_T_I354:
   4208 	case WM_T_I210:
   4209 	case WM_T_I211:
   4210 		if (sc->sc_type == WM_T_82571) {
   4211 			/* Only 82571 shares port 0 */
   4212 			mask = EEMNGCTL_CFGDONE_0;
   4213 		} else
   4214 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4215 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4216 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4217 				break;
   4218 			delay(1000);
   4219 		}
   4220 		if (i >= WM_PHY_CFG_TIMEOUT)
   4221 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4222 				device_xname(sc->sc_dev), __func__));
   4223 		break;
   4224 	case WM_T_ICH8:
   4225 	case WM_T_ICH9:
   4226 	case WM_T_ICH10:
   4227 	case WM_T_PCH:
   4228 	case WM_T_PCH2:
   4229 	case WM_T_PCH_LPT:
   4230 	case WM_T_PCH_SPT:
   4231 	case WM_T_PCH_CNP:
   4232 		delay(10*1000);
   4233 		if (sc->sc_type >= WM_T_ICH10)
   4234 			wm_lan_init_done(sc);
   4235 		else
   4236 			wm_get_auto_rd_done(sc);
   4237 
   4238 		/* Clear PHY Reset Asserted bit */
   4239 		reg = CSR_READ(sc, WMREG_STATUS);
   4240 		if ((reg & STATUS_PHYRA) != 0)
   4241 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4242 		break;
   4243 	default:
   4244 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4245 		    __func__);
   4246 		break;
   4247 	}
   4248 }
   4249 
   4250 int
   4251 wm_phy_post_reset(struct wm_softc *sc)
   4252 {
   4253 	device_t dev = sc->sc_dev;
   4254 	uint16_t reg;
   4255 	int rv = 0;
   4256 
   4257 	/* This function is only for ICH8 and newer. */
   4258 	if (sc->sc_type < WM_T_ICH8)
   4259 		return 0;
   4260 
   4261 	if (wm_phy_resetisblocked(sc)) {
   4262 		/* XXX */
   4263 		device_printf(dev, "PHY is blocked\n");
   4264 		return -1;
   4265 	}
   4266 
   4267 	/* Allow time for h/w to get to quiescent state after reset */
   4268 	delay(10*1000);
   4269 
   4270 	/* Perform any necessary post-reset workarounds */
   4271 	if (sc->sc_type == WM_T_PCH)
   4272 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4273 	else if (sc->sc_type == WM_T_PCH2)
   4274 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4275 	if (rv != 0)
   4276 		return rv;
   4277 
   4278 	/* Clear the host wakeup bit after lcd reset */
   4279 	if (sc->sc_type >= WM_T_PCH) {
   4280 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4281 		reg &= ~BM_WUC_HOST_WU_BIT;
   4282 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4283 	}
   4284 
   4285 	/* Configure the LCD with the extended configuration region in NVM */
   4286 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4287 		return rv;
   4288 
   4289 	/* Configure the LCD with the OEM bits in NVM */
   4290 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4291 
   4292 	if (sc->sc_type == WM_T_PCH2) {
   4293 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4294 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4295 			delay(10 * 1000);
   4296 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4297 		}
   4298 		/* Set EEE LPI Update Timer to 200usec */
   4299 		rv = sc->phy.acquire(sc);
   4300 		if (rv)
   4301 			return rv;
   4302 		rv = wm_write_emi_reg_locked(dev,
   4303 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4304 		sc->phy.release(sc);
   4305 	}
   4306 
   4307 	return rv;
   4308 }
   4309 
   4310 /* Only for PCH and newer */
   4311 static int
   4312 wm_write_smbus_addr(struct wm_softc *sc)
   4313 {
   4314 	uint32_t strap, freq;
   4315 	uint16_t phy_data;
   4316 	int rv;
   4317 
   4318 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4319 		device_xname(sc->sc_dev), __func__));
   4320 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4321 
   4322 	strap = CSR_READ(sc, WMREG_STRAP);
   4323 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4324 
   4325 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4326 	if (rv != 0)
   4327 		return -1;
   4328 
   4329 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4330 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4331 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4332 
   4333 	if (sc->sc_phytype == WMPHY_I217) {
   4334 		/* Restore SMBus frequency */
   4335 		if (freq --) {
   4336 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4337 			    | HV_SMB_ADDR_FREQ_HIGH);
   4338 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4339 			    HV_SMB_ADDR_FREQ_LOW);
   4340 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4341 			    HV_SMB_ADDR_FREQ_HIGH);
   4342 		} else
   4343 			DPRINTF(sc, WM_DEBUG_INIT,
   4344 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4345 				device_xname(sc->sc_dev), __func__));
   4346 	}
   4347 
   4348 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4349 	    phy_data);
   4350 }
   4351 
   4352 static int
   4353 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4354 {
   4355 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4356 	uint16_t phy_page = 0;
   4357 	int rv = 0;
   4358 
   4359 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4360 		device_xname(sc->sc_dev), __func__));
   4361 
   4362 	switch (sc->sc_type) {
   4363 	case WM_T_ICH8:
   4364 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4365 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4366 			return 0;
   4367 
   4368 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4369 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4370 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4371 			break;
   4372 		}
   4373 		/* FALLTHROUGH */
   4374 	case WM_T_PCH:
   4375 	case WM_T_PCH2:
   4376 	case WM_T_PCH_LPT:
   4377 	case WM_T_PCH_SPT:
   4378 	case WM_T_PCH_CNP:
   4379 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4380 		break;
   4381 	default:
   4382 		return 0;
   4383 	}
   4384 
   4385 	if ((rv = sc->phy.acquire(sc)) != 0)
   4386 		return rv;
   4387 
   4388 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4389 	if ((reg & sw_cfg_mask) == 0)
   4390 		goto release;
   4391 
   4392 	/*
   4393 	 * Make sure HW does not configure LCD from PHY extended configuration
   4394 	 * before SW configuration
   4395 	 */
   4396 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4397 	if ((sc->sc_type < WM_T_PCH2)
   4398 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4399 		goto release;
   4400 
   4401 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4402 		device_xname(sc->sc_dev), __func__));
   4403 	/* word_addr is in DWORD */
   4404 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4405 
   4406 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4407 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4408 	if (cnf_size == 0)
   4409 		goto release;
   4410 
   4411 	if (((sc->sc_type == WM_T_PCH)
   4412 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4413 	    || (sc->sc_type > WM_T_PCH)) {
   4414 		/*
   4415 		 * HW configures the SMBus address and LEDs when the OEM and
   4416 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4417 		 * are cleared, SW will configure them instead.
   4418 		 */
   4419 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4420 			device_xname(sc->sc_dev), __func__));
   4421 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4422 			goto release;
   4423 
   4424 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4425 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4426 		    (uint16_t)reg);
   4427 		if (rv != 0)
   4428 			goto release;
   4429 	}
   4430 
   4431 	/* Configure LCD from extended configuration region. */
   4432 	for (i = 0; i < cnf_size; i++) {
   4433 		uint16_t reg_data, reg_addr;
   4434 
   4435 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4436 			goto release;
   4437 
   4438 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4439 			goto release;
   4440 
   4441 		if (reg_addr == IGPHY_PAGE_SELECT)
   4442 			phy_page = reg_data;
   4443 
   4444 		reg_addr &= IGPHY_MAXREGADDR;
   4445 		reg_addr |= phy_page;
   4446 
   4447 		KASSERT(sc->phy.writereg_locked != NULL);
   4448 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4449 		    reg_data);
   4450 	}
   4451 
   4452 release:
   4453 	sc->phy.release(sc);
   4454 	return rv;
   4455 }
   4456 
   4457 /*
   4458  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4459  *  @sc:       pointer to the HW structure
   4460  *  @d0_state: boolean if entering d0 or d3 device state
   4461  *
   4462  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4463  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4464  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4465  */
   4466 int
   4467 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4468 {
   4469 	uint32_t mac_reg;
   4470 	uint16_t oem_reg;
   4471 	int rv;
   4472 
   4473 	if (sc->sc_type < WM_T_PCH)
   4474 		return 0;
   4475 
   4476 	rv = sc->phy.acquire(sc);
   4477 	if (rv != 0)
   4478 		return rv;
   4479 
   4480 	if (sc->sc_type == WM_T_PCH) {
   4481 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4482 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4483 			goto release;
   4484 	}
   4485 
   4486 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4487 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4488 		goto release;
   4489 
   4490 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4491 
   4492 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4493 	if (rv != 0)
   4494 		goto release;
   4495 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4496 
   4497 	if (d0_state) {
   4498 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4499 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4500 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4501 			oem_reg |= HV_OEM_BITS_LPLU;
   4502 	} else {
   4503 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4504 		    != 0)
   4505 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4506 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4507 		    != 0)
   4508 			oem_reg |= HV_OEM_BITS_LPLU;
   4509 	}
   4510 
   4511 	/* Set Restart auto-neg to activate the bits */
   4512 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4513 	    && (wm_phy_resetisblocked(sc) == false))
   4514 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4515 
   4516 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4517 
   4518 release:
   4519 	sc->phy.release(sc);
   4520 
   4521 	return rv;
   4522 }
   4523 
   4524 /* Init hardware bits */
   4525 void
   4526 wm_initialize_hardware_bits(struct wm_softc *sc)
   4527 {
   4528 	uint32_t tarc0, tarc1, reg;
   4529 
   4530 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4531 		device_xname(sc->sc_dev), __func__));
   4532 
   4533 	/* For 82571 variant, 80003 and ICHs */
   4534 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4535 	    || (sc->sc_type >= WM_T_80003)) {
   4536 
   4537 		/* Transmit Descriptor Control 0 */
   4538 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4539 		reg |= TXDCTL_COUNT_DESC;
   4540 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4541 
   4542 		/* Transmit Descriptor Control 1 */
   4543 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4544 		reg |= TXDCTL_COUNT_DESC;
   4545 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4546 
   4547 		/* TARC0 */
   4548 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4549 		switch (sc->sc_type) {
   4550 		case WM_T_82571:
   4551 		case WM_T_82572:
   4552 		case WM_T_82573:
   4553 		case WM_T_82574:
   4554 		case WM_T_82583:
   4555 		case WM_T_80003:
   4556 			/* Clear bits 30..27 */
   4557 			tarc0 &= ~__BITS(30, 27);
   4558 			break;
   4559 		default:
   4560 			break;
   4561 		}
   4562 
   4563 		switch (sc->sc_type) {
   4564 		case WM_T_82571:
   4565 		case WM_T_82572:
   4566 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4567 
   4568 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4569 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4570 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4571 			/* 8257[12] Errata No.7 */
   4572 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4573 
   4574 			/* TARC1 bit 28 */
   4575 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4576 				tarc1 &= ~__BIT(28);
   4577 			else
   4578 				tarc1 |= __BIT(28);
   4579 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4580 
   4581 			/*
   4582 			 * 8257[12] Errata No.13
   4583 			 * Disable Dyamic Clock Gating.
   4584 			 */
   4585 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4586 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4587 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4588 			break;
   4589 		case WM_T_82573:
   4590 		case WM_T_82574:
   4591 		case WM_T_82583:
   4592 			if ((sc->sc_type == WM_T_82574)
   4593 			    || (sc->sc_type == WM_T_82583))
   4594 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4595 
   4596 			/* Extended Device Control */
   4597 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4598 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4599 			reg |= __BIT(22);	/* Set bit 22 */
   4600 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4601 
   4602 			/* Device Control */
   4603 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4604 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4605 
   4606 			/* PCIe Control Register */
   4607 			/*
   4608 			 * 82573 Errata (unknown).
   4609 			 *
   4610 			 * 82574 Errata 25 and 82583 Errata 12
   4611 			 * "Dropped Rx Packets":
   4612 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4613 			 */
   4614 			reg = CSR_READ(sc, WMREG_GCR);
   4615 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4616 			CSR_WRITE(sc, WMREG_GCR, reg);
   4617 
   4618 			if ((sc->sc_type == WM_T_82574)
   4619 			    || (sc->sc_type == WM_T_82583)) {
   4620 				/*
   4621 				 * Document says this bit must be set for
   4622 				 * proper operation.
   4623 				 */
   4624 				reg = CSR_READ(sc, WMREG_GCR);
   4625 				reg |= __BIT(22);
   4626 				CSR_WRITE(sc, WMREG_GCR, reg);
   4627 
   4628 				/*
   4629 				 * Apply workaround for hardware errata
   4630 				 * documented in errata docs Fixes issue where
   4631 				 * some error prone or unreliable PCIe
   4632 				 * completions are occurring, particularly
   4633 				 * with ASPM enabled. Without fix, issue can
   4634 				 * cause Tx timeouts.
   4635 				 */
   4636 				reg = CSR_READ(sc, WMREG_GCR2);
   4637 				reg |= __BIT(0);
   4638 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4639 			}
   4640 			break;
   4641 		case WM_T_80003:
   4642 			/* TARC0 */
   4643 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4644 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4645 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4646 
   4647 			/* TARC1 bit 28 */
   4648 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4649 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4650 				tarc1 &= ~__BIT(28);
   4651 			else
   4652 				tarc1 |= __BIT(28);
   4653 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4654 			break;
   4655 		case WM_T_ICH8:
   4656 		case WM_T_ICH9:
   4657 		case WM_T_ICH10:
   4658 		case WM_T_PCH:
   4659 		case WM_T_PCH2:
   4660 		case WM_T_PCH_LPT:
   4661 		case WM_T_PCH_SPT:
   4662 		case WM_T_PCH_CNP:
   4663 			/* TARC0 */
   4664 			if (sc->sc_type == WM_T_ICH8) {
   4665 				/* Set TARC0 bits 29 and 28 */
   4666 				tarc0 |= __BITS(29, 28);
   4667 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4668 				tarc0 |= __BIT(29);
   4669 				/*
   4670 				 *  Drop bit 28. From Linux.
   4671 				 * See I218/I219 spec update
   4672 				 * "5. Buffer Overrun While the I219 is
   4673 				 * Processing DMA Transactions"
   4674 				 */
   4675 				tarc0 &= ~__BIT(28);
   4676 			}
   4677 			/* Set TARC0 bits 23,24,26,27 */
   4678 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4679 
   4680 			/* CTRL_EXT */
   4681 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4682 			reg |= __BIT(22);	/* Set bit 22 */
   4683 			/*
   4684 			 * Enable PHY low-power state when MAC is at D3
   4685 			 * w/o WoL
   4686 			 */
   4687 			if (sc->sc_type >= WM_T_PCH)
   4688 				reg |= CTRL_EXT_PHYPDEN;
   4689 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4690 
   4691 			/* TARC1 */
   4692 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4693 			/* bit 28 */
   4694 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4695 				tarc1 &= ~__BIT(28);
   4696 			else
   4697 				tarc1 |= __BIT(28);
   4698 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4699 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4700 
   4701 			/* Device Status */
   4702 			if (sc->sc_type == WM_T_ICH8) {
   4703 				reg = CSR_READ(sc, WMREG_STATUS);
   4704 				reg &= ~__BIT(31);
   4705 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4706 
   4707 			}
   4708 
   4709 			/* IOSFPC */
   4710 			if (sc->sc_type == WM_T_PCH_SPT) {
   4711 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4712 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4713 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4714 			}
   4715 			/*
   4716 			 * Work-around descriptor data corruption issue during
   4717 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4718 			 * capability.
   4719 			 */
   4720 			reg = CSR_READ(sc, WMREG_RFCTL);
   4721 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4722 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4723 			break;
   4724 		default:
   4725 			break;
   4726 		}
   4727 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4728 
   4729 		switch (sc->sc_type) {
   4730 		/*
   4731 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4732 		 * Avoid RSS Hash Value bug.
   4733 		 */
   4734 		case WM_T_82571:
   4735 		case WM_T_82572:
   4736 		case WM_T_82573:
   4737 		case WM_T_80003:
   4738 		case WM_T_ICH8:
   4739 			reg = CSR_READ(sc, WMREG_RFCTL);
   4740 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4741 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4742 			break;
   4743 		case WM_T_82574:
   4744 			/* Use extened Rx descriptor. */
   4745 			reg = CSR_READ(sc, WMREG_RFCTL);
   4746 			reg |= WMREG_RFCTL_EXSTEN;
   4747 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4748 			break;
   4749 		default:
   4750 			break;
   4751 		}
   4752 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4753 		/*
   4754 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4755 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4756 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4757 		 * Correctly by the Device"
   4758 		 *
   4759 		 * I354(C2000) Errata AVR53:
   4760 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4761 		 * Hang"
   4762 		 */
   4763 		reg = CSR_READ(sc, WMREG_RFCTL);
   4764 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4765 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4766 	}
   4767 }
   4768 
   4769 static uint32_t
   4770 wm_rxpbs_adjust_82580(uint32_t val)
   4771 {
   4772 	uint32_t rv = 0;
   4773 
   4774 	if (val < __arraycount(wm_82580_rxpbs_table))
   4775 		rv = wm_82580_rxpbs_table[val];
   4776 
   4777 	return rv;
   4778 }
   4779 
   4780 /*
   4781  * wm_reset_phy:
   4782  *
   4783  *	generic PHY reset function.
   4784  *	Same as e1000_phy_hw_reset_generic()
   4785  */
   4786 static int
   4787 wm_reset_phy(struct wm_softc *sc)
   4788 {
   4789 	uint32_t reg;
   4790 
   4791 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4792 		device_xname(sc->sc_dev), __func__));
   4793 	if (wm_phy_resetisblocked(sc))
   4794 		return -1;
   4795 
   4796 	sc->phy.acquire(sc);
   4797 
   4798 	reg = CSR_READ(sc, WMREG_CTRL);
   4799 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4800 	CSR_WRITE_FLUSH(sc);
   4801 
   4802 	delay(sc->phy.reset_delay_us);
   4803 
   4804 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4805 	CSR_WRITE_FLUSH(sc);
   4806 
   4807 	delay(150);
   4808 
   4809 	sc->phy.release(sc);
   4810 
   4811 	wm_get_cfg_done(sc);
   4812 	wm_phy_post_reset(sc);
   4813 
   4814 	return 0;
   4815 }
   4816 
   4817 /*
   4818  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4819  * so it is enough to check sc->sc_queue[0] only.
   4820  */
   4821 static void
   4822 wm_flush_desc_rings(struct wm_softc *sc)
   4823 {
   4824 	pcireg_t preg;
   4825 	uint32_t reg;
   4826 	struct wm_txqueue *txq;
   4827 	wiseman_txdesc_t *txd;
   4828 	int nexttx;
   4829 	uint32_t rctl;
   4830 
   4831 	/* First, disable MULR fix in FEXTNVM11 */
   4832 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4833 	reg |= FEXTNVM11_DIS_MULRFIX;
   4834 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4835 
   4836 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4837 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4838 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4839 		return;
   4840 
   4841 	/* TX */
   4842 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4843 	    preg, reg);
   4844 	reg = CSR_READ(sc, WMREG_TCTL);
   4845 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4846 
   4847 	txq = &sc->sc_queue[0].wmq_txq;
   4848 	nexttx = txq->txq_next;
   4849 	txd = &txq->txq_descs[nexttx];
   4850 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4851 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4852 	txd->wtx_fields.wtxu_status = 0;
   4853 	txd->wtx_fields.wtxu_options = 0;
   4854 	txd->wtx_fields.wtxu_vlan = 0;
   4855 
   4856 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4857 	    BUS_SPACE_BARRIER_WRITE);
   4858 
   4859 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4860 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4861 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4862 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4863 	delay(250);
   4864 
   4865 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4866 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4867 		return;
   4868 
   4869 	/* RX */
   4870 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4871 	rctl = CSR_READ(sc, WMREG_RCTL);
   4872 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4873 	CSR_WRITE_FLUSH(sc);
   4874 	delay(150);
   4875 
   4876 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4877 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4878 	reg &= 0xffffc000;
   4879 	/*
   4880 	 * Update thresholds: prefetch threshold to 31, host threshold
   4881 	 * to 1 and make sure the granularity is "descriptors" and not
   4882 	 * "cache lines"
   4883 	 */
   4884 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4885 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4886 
   4887 	/* Momentarily enable the RX ring for the changes to take effect */
   4888 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4889 	CSR_WRITE_FLUSH(sc);
   4890 	delay(150);
   4891 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4892 }
   4893 
   4894 /*
   4895  * wm_reset:
   4896  *
   4897  *	Reset the i82542 chip.
   4898  */
   4899 static void
   4900 wm_reset(struct wm_softc *sc)
   4901 {
   4902 	int phy_reset = 0;
   4903 	int i, error = 0;
   4904 	uint32_t reg;
   4905 	uint16_t kmreg;
   4906 	int rv;
   4907 
   4908 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4909 		device_xname(sc->sc_dev), __func__));
   4910 	KASSERT(sc->sc_type != 0);
   4911 
   4912 	/*
   4913 	 * Allocate on-chip memory according to the MTU size.
   4914 	 * The Packet Buffer Allocation register must be written
   4915 	 * before the chip is reset.
   4916 	 */
   4917 	switch (sc->sc_type) {
   4918 	case WM_T_82547:
   4919 	case WM_T_82547_2:
   4920 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4921 		    PBA_22K : PBA_30K;
   4922 		for (i = 0; i < sc->sc_nqueues; i++) {
   4923 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4924 			txq->txq_fifo_head = 0;
   4925 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4926 			txq->txq_fifo_size =
   4927 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4928 			txq->txq_fifo_stall = 0;
   4929 		}
   4930 		break;
   4931 	case WM_T_82571:
   4932 	case WM_T_82572:
   4933 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4934 	case WM_T_80003:
   4935 		sc->sc_pba = PBA_32K;
   4936 		break;
   4937 	case WM_T_82573:
   4938 		sc->sc_pba = PBA_12K;
   4939 		break;
   4940 	case WM_T_82574:
   4941 	case WM_T_82583:
   4942 		sc->sc_pba = PBA_20K;
   4943 		break;
   4944 	case WM_T_82576:
   4945 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4946 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4947 		break;
   4948 	case WM_T_82580:
   4949 	case WM_T_I350:
   4950 	case WM_T_I354:
   4951 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4952 		break;
   4953 	case WM_T_I210:
   4954 	case WM_T_I211:
   4955 		sc->sc_pba = PBA_34K;
   4956 		break;
   4957 	case WM_T_ICH8:
   4958 		/* Workaround for a bit corruption issue in FIFO memory */
   4959 		sc->sc_pba = PBA_8K;
   4960 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4961 		break;
   4962 	case WM_T_ICH9:
   4963 	case WM_T_ICH10:
   4964 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4965 		    PBA_14K : PBA_10K;
   4966 		break;
   4967 	case WM_T_PCH:
   4968 	case WM_T_PCH2:	/* XXX 14K? */
   4969 	case WM_T_PCH_LPT:
   4970 	case WM_T_PCH_SPT:
   4971 	case WM_T_PCH_CNP:
   4972 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   4973 		    PBA_12K : PBA_26K;
   4974 		break;
   4975 	default:
   4976 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4977 		    PBA_40K : PBA_48K;
   4978 		break;
   4979 	}
   4980 	/*
   4981 	 * Only old or non-multiqueue devices have the PBA register
   4982 	 * XXX Need special handling for 82575.
   4983 	 */
   4984 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4985 	    || (sc->sc_type == WM_T_82575))
   4986 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4987 
   4988 	/* Prevent the PCI-E bus from sticking */
   4989 	if (sc->sc_flags & WM_F_PCIE) {
   4990 		int timeout = 800;
   4991 
   4992 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4993 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4994 
   4995 		while (timeout--) {
   4996 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4997 			    == 0)
   4998 				break;
   4999 			delay(100);
   5000 		}
   5001 		if (timeout == 0)
   5002 			device_printf(sc->sc_dev,
   5003 			    "failed to disable busmastering\n");
   5004 	}
   5005 
   5006 	/* Set the completion timeout for interface */
   5007 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5008 	    || (sc->sc_type == WM_T_82580)
   5009 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5010 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5011 		wm_set_pcie_completion_timeout(sc);
   5012 
   5013 	/* Clear interrupt */
   5014 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5015 	if (wm_is_using_msix(sc)) {
   5016 		if (sc->sc_type != WM_T_82574) {
   5017 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5018 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5019 		} else
   5020 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5021 	}
   5022 
   5023 	/* Stop the transmit and receive processes. */
   5024 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5025 	sc->sc_rctl &= ~RCTL_EN;
   5026 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5027 	CSR_WRITE_FLUSH(sc);
   5028 
   5029 	/* XXX set_tbi_sbp_82543() */
   5030 
   5031 	delay(10*1000);
   5032 
   5033 	/* Must acquire the MDIO ownership before MAC reset */
   5034 	switch (sc->sc_type) {
   5035 	case WM_T_82573:
   5036 	case WM_T_82574:
   5037 	case WM_T_82583:
   5038 		error = wm_get_hw_semaphore_82573(sc);
   5039 		break;
   5040 	default:
   5041 		break;
   5042 	}
   5043 
   5044 	/*
   5045 	 * 82541 Errata 29? & 82547 Errata 28?
   5046 	 * See also the description about PHY_RST bit in CTRL register
   5047 	 * in 8254x_GBe_SDM.pdf.
   5048 	 */
   5049 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5050 		CSR_WRITE(sc, WMREG_CTRL,
   5051 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5052 		CSR_WRITE_FLUSH(sc);
   5053 		delay(5000);
   5054 	}
   5055 
   5056 	switch (sc->sc_type) {
   5057 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5058 	case WM_T_82541:
   5059 	case WM_T_82541_2:
   5060 	case WM_T_82547:
   5061 	case WM_T_82547_2:
   5062 		/*
   5063 		 * On some chipsets, a reset through a memory-mapped write
   5064 		 * cycle can cause the chip to reset before completing the
   5065 		 * write cycle. This causes major headache that can be avoided
   5066 		 * by issuing the reset via indirect register writes through
   5067 		 * I/O space.
   5068 		 *
   5069 		 * So, if we successfully mapped the I/O BAR at attach time,
   5070 		 * use that. Otherwise, try our luck with a memory-mapped
   5071 		 * reset.
   5072 		 */
   5073 		if (sc->sc_flags & WM_F_IOH_VALID)
   5074 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5075 		else
   5076 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5077 		break;
   5078 	case WM_T_82545_3:
   5079 	case WM_T_82546_3:
   5080 		/* Use the shadow control register on these chips. */
   5081 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5082 		break;
   5083 	case WM_T_80003:
   5084 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5085 		sc->phy.acquire(sc);
   5086 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5087 		sc->phy.release(sc);
   5088 		break;
   5089 	case WM_T_ICH8:
   5090 	case WM_T_ICH9:
   5091 	case WM_T_ICH10:
   5092 	case WM_T_PCH:
   5093 	case WM_T_PCH2:
   5094 	case WM_T_PCH_LPT:
   5095 	case WM_T_PCH_SPT:
   5096 	case WM_T_PCH_CNP:
   5097 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5098 		if (wm_phy_resetisblocked(sc) == false) {
   5099 			/*
   5100 			 * Gate automatic PHY configuration by hardware on
   5101 			 * non-managed 82579
   5102 			 */
   5103 			if ((sc->sc_type == WM_T_PCH2)
   5104 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5105 				== 0))
   5106 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5107 
   5108 			reg |= CTRL_PHY_RESET;
   5109 			phy_reset = 1;
   5110 		} else
   5111 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5112 		sc->phy.acquire(sc);
   5113 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5114 		/* Don't insert a completion barrier when reset */
   5115 		delay(20*1000);
   5116 		mutex_exit(sc->sc_ich_phymtx);
   5117 		break;
   5118 	case WM_T_82580:
   5119 	case WM_T_I350:
   5120 	case WM_T_I354:
   5121 	case WM_T_I210:
   5122 	case WM_T_I211:
   5123 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5124 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5125 			CSR_WRITE_FLUSH(sc);
   5126 		delay(5000);
   5127 		break;
   5128 	case WM_T_82542_2_0:
   5129 	case WM_T_82542_2_1:
   5130 	case WM_T_82543:
   5131 	case WM_T_82540:
   5132 	case WM_T_82545:
   5133 	case WM_T_82546:
   5134 	case WM_T_82571:
   5135 	case WM_T_82572:
   5136 	case WM_T_82573:
   5137 	case WM_T_82574:
   5138 	case WM_T_82575:
   5139 	case WM_T_82576:
   5140 	case WM_T_82583:
   5141 	default:
   5142 		/* Everything else can safely use the documented method. */
   5143 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5144 		break;
   5145 	}
   5146 
   5147 	/* Must release the MDIO ownership after MAC reset */
   5148 	switch (sc->sc_type) {
   5149 	case WM_T_82573:
   5150 	case WM_T_82574:
   5151 	case WM_T_82583:
   5152 		if (error == 0)
   5153 			wm_put_hw_semaphore_82573(sc);
   5154 		break;
   5155 	default:
   5156 		break;
   5157 	}
   5158 
   5159 	/* Set Phy Config Counter to 50msec */
   5160 	if (sc->sc_type == WM_T_PCH2) {
   5161 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5162 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5163 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5164 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5165 	}
   5166 
   5167 	if (phy_reset != 0)
   5168 		wm_get_cfg_done(sc);
   5169 
   5170 	/* Reload EEPROM */
   5171 	switch (sc->sc_type) {
   5172 	case WM_T_82542_2_0:
   5173 	case WM_T_82542_2_1:
   5174 	case WM_T_82543:
   5175 	case WM_T_82544:
   5176 		delay(10);
   5177 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5178 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5179 		CSR_WRITE_FLUSH(sc);
   5180 		delay(2000);
   5181 		break;
   5182 	case WM_T_82540:
   5183 	case WM_T_82545:
   5184 	case WM_T_82545_3:
   5185 	case WM_T_82546:
   5186 	case WM_T_82546_3:
   5187 		delay(5*1000);
   5188 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5189 		break;
   5190 	case WM_T_82541:
   5191 	case WM_T_82541_2:
   5192 	case WM_T_82547:
   5193 	case WM_T_82547_2:
   5194 		delay(20000);
   5195 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5196 		break;
   5197 	case WM_T_82571:
   5198 	case WM_T_82572:
   5199 	case WM_T_82573:
   5200 	case WM_T_82574:
   5201 	case WM_T_82583:
   5202 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5203 			delay(10);
   5204 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5205 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5206 			CSR_WRITE_FLUSH(sc);
   5207 		}
   5208 		/* check EECD_EE_AUTORD */
   5209 		wm_get_auto_rd_done(sc);
   5210 		/*
   5211 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5212 		 * is set.
   5213 		 */
   5214 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5215 		    || (sc->sc_type == WM_T_82583))
   5216 			delay(25*1000);
   5217 		break;
   5218 	case WM_T_82575:
   5219 	case WM_T_82576:
   5220 	case WM_T_82580:
   5221 	case WM_T_I350:
   5222 	case WM_T_I354:
   5223 	case WM_T_I210:
   5224 	case WM_T_I211:
   5225 	case WM_T_80003:
   5226 		/* check EECD_EE_AUTORD */
   5227 		wm_get_auto_rd_done(sc);
   5228 		break;
   5229 	case WM_T_ICH8:
   5230 	case WM_T_ICH9:
   5231 	case WM_T_ICH10:
   5232 	case WM_T_PCH:
   5233 	case WM_T_PCH2:
   5234 	case WM_T_PCH_LPT:
   5235 	case WM_T_PCH_SPT:
   5236 	case WM_T_PCH_CNP:
   5237 		break;
   5238 	default:
   5239 		panic("%s: unknown type\n", __func__);
   5240 	}
   5241 
   5242 	/* Check whether EEPROM is present or not */
   5243 	switch (sc->sc_type) {
   5244 	case WM_T_82575:
   5245 	case WM_T_82576:
   5246 	case WM_T_82580:
   5247 	case WM_T_I350:
   5248 	case WM_T_I354:
   5249 	case WM_T_ICH8:
   5250 	case WM_T_ICH9:
   5251 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5252 			/* Not found */
   5253 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5254 			if (sc->sc_type == WM_T_82575)
   5255 				wm_reset_init_script_82575(sc);
   5256 		}
   5257 		break;
   5258 	default:
   5259 		break;
   5260 	}
   5261 
   5262 	if (phy_reset != 0)
   5263 		wm_phy_post_reset(sc);
   5264 
   5265 	if ((sc->sc_type == WM_T_82580)
   5266 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5267 		/* Clear global device reset status bit */
   5268 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5269 	}
   5270 
   5271 	/* Clear any pending interrupt events. */
   5272 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5273 	reg = CSR_READ(sc, WMREG_ICR);
   5274 	if (wm_is_using_msix(sc)) {
   5275 		if (sc->sc_type != WM_T_82574) {
   5276 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5277 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5278 		} else
   5279 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5280 	}
   5281 
   5282 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5283 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5284 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5285 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5286 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5287 		reg |= KABGTXD_BGSQLBIAS;
   5288 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5289 	}
   5290 
   5291 	/* Reload sc_ctrl */
   5292 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5293 
   5294 	wm_set_eee(sc);
   5295 
   5296 	/*
   5297 	 * For PCH, this write will make sure that any noise will be detected
   5298 	 * as a CRC error and be dropped rather than show up as a bad packet
   5299 	 * to the DMA engine
   5300 	 */
   5301 	if (sc->sc_type == WM_T_PCH)
   5302 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5303 
   5304 	if (sc->sc_type >= WM_T_82544)
   5305 		CSR_WRITE(sc, WMREG_WUC, 0);
   5306 
   5307 	if (sc->sc_type < WM_T_82575)
   5308 		wm_disable_aspm(sc); /* Workaround for some chips */
   5309 
   5310 	wm_reset_mdicnfg_82580(sc);
   5311 
   5312 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5313 		wm_pll_workaround_i210(sc);
   5314 
   5315 	if (sc->sc_type == WM_T_80003) {
   5316 		/* Default to TRUE to enable the MDIC W/A */
   5317 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5318 
   5319 		rv = wm_kmrn_readreg(sc,
   5320 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5321 		if (rv == 0) {
   5322 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5323 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5324 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5325 			else
   5326 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5327 		}
   5328 	}
   5329 }
   5330 
   5331 /*
   5332  * wm_add_rxbuf:
   5333  *
   5334  *	Add a receive buffer to the indiciated descriptor.
   5335  */
   5336 static int
   5337 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5338 {
   5339 	struct wm_softc *sc = rxq->rxq_sc;
   5340 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5341 	struct mbuf *m;
   5342 	int error;
   5343 
   5344 	KASSERT(mutex_owned(rxq->rxq_lock));
   5345 
   5346 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5347 	if (m == NULL)
   5348 		return ENOBUFS;
   5349 
   5350 	MCLGET(m, M_DONTWAIT);
   5351 	if ((m->m_flags & M_EXT) == 0) {
   5352 		m_freem(m);
   5353 		return ENOBUFS;
   5354 	}
   5355 
   5356 	if (rxs->rxs_mbuf != NULL)
   5357 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5358 
   5359 	rxs->rxs_mbuf = m;
   5360 
   5361 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5362 	/*
   5363 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5364 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5365 	 */
   5366 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5367 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5368 	if (error) {
   5369 		/* XXX XXX XXX */
   5370 		aprint_error_dev(sc->sc_dev,
   5371 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5372 		panic("wm_add_rxbuf");
   5373 	}
   5374 
   5375 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5376 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5377 
   5378 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5379 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5380 			wm_init_rxdesc(rxq, idx);
   5381 	} else
   5382 		wm_init_rxdesc(rxq, idx);
   5383 
   5384 	return 0;
   5385 }
   5386 
   5387 /*
   5388  * wm_rxdrain:
   5389  *
   5390  *	Drain the receive queue.
   5391  */
   5392 static void
   5393 wm_rxdrain(struct wm_rxqueue *rxq)
   5394 {
   5395 	struct wm_softc *sc = rxq->rxq_sc;
   5396 	struct wm_rxsoft *rxs;
   5397 	int i;
   5398 
   5399 	KASSERT(mutex_owned(rxq->rxq_lock));
   5400 
   5401 	for (i = 0; i < WM_NRXDESC; i++) {
   5402 		rxs = &rxq->rxq_soft[i];
   5403 		if (rxs->rxs_mbuf != NULL) {
   5404 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5405 			m_freem(rxs->rxs_mbuf);
   5406 			rxs->rxs_mbuf = NULL;
   5407 		}
   5408 	}
   5409 }
   5410 
   5411 /*
   5412  * Setup registers for RSS.
   5413  *
   5414  * XXX not yet VMDq support
   5415  */
   5416 static void
   5417 wm_init_rss(struct wm_softc *sc)
   5418 {
   5419 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5420 	int i;
   5421 
   5422 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5423 
   5424 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5425 		unsigned int qid, reta_ent;
   5426 
   5427 		qid  = i % sc->sc_nqueues;
   5428 		switch (sc->sc_type) {
   5429 		case WM_T_82574:
   5430 			reta_ent = __SHIFTIN(qid,
   5431 			    RETA_ENT_QINDEX_MASK_82574);
   5432 			break;
   5433 		case WM_T_82575:
   5434 			reta_ent = __SHIFTIN(qid,
   5435 			    RETA_ENT_QINDEX1_MASK_82575);
   5436 			break;
   5437 		default:
   5438 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5439 			break;
   5440 		}
   5441 
   5442 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5443 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5444 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5445 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5446 	}
   5447 
   5448 	rss_getkey((uint8_t *)rss_key);
   5449 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5450 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5451 
   5452 	if (sc->sc_type == WM_T_82574)
   5453 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5454 	else
   5455 		mrqc = MRQC_ENABLE_RSS_MQ;
   5456 
   5457 	/*
   5458 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5459 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5460 	 */
   5461 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5462 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5463 #if 0
   5464 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5465 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5466 #endif
   5467 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5468 
   5469 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5470 }
   5471 
   5472 /*
   5473  * Adjust TX and RX queue numbers which the system actulally uses.
   5474  *
   5475  * The numbers are affected by below parameters.
   5476  *     - The nubmer of hardware queues
   5477  *     - The number of MSI-X vectors (= "nvectors" argument)
   5478  *     - ncpu
   5479  */
   5480 static void
   5481 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5482 {
   5483 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5484 
   5485 	if (nvectors < 2) {
   5486 		sc->sc_nqueues = 1;
   5487 		return;
   5488 	}
   5489 
   5490 	switch (sc->sc_type) {
   5491 	case WM_T_82572:
   5492 		hw_ntxqueues = 2;
   5493 		hw_nrxqueues = 2;
   5494 		break;
   5495 	case WM_T_82574:
   5496 		hw_ntxqueues = 2;
   5497 		hw_nrxqueues = 2;
   5498 		break;
   5499 	case WM_T_82575:
   5500 		hw_ntxqueues = 4;
   5501 		hw_nrxqueues = 4;
   5502 		break;
   5503 	case WM_T_82576:
   5504 		hw_ntxqueues = 16;
   5505 		hw_nrxqueues = 16;
   5506 		break;
   5507 	case WM_T_82580:
   5508 	case WM_T_I350:
   5509 	case WM_T_I354:
   5510 		hw_ntxqueues = 8;
   5511 		hw_nrxqueues = 8;
   5512 		break;
   5513 	case WM_T_I210:
   5514 		hw_ntxqueues = 4;
   5515 		hw_nrxqueues = 4;
   5516 		break;
   5517 	case WM_T_I211:
   5518 		hw_ntxqueues = 2;
   5519 		hw_nrxqueues = 2;
   5520 		break;
   5521 		/*
   5522 		 * As below ethernet controllers does not support MSI-X,
   5523 		 * this driver let them not use multiqueue.
   5524 		 *     - WM_T_80003
   5525 		 *     - WM_T_ICH8
   5526 		 *     - WM_T_ICH9
   5527 		 *     - WM_T_ICH10
   5528 		 *     - WM_T_PCH
   5529 		 *     - WM_T_PCH2
   5530 		 *     - WM_T_PCH_LPT
   5531 		 */
   5532 	default:
   5533 		hw_ntxqueues = 1;
   5534 		hw_nrxqueues = 1;
   5535 		break;
   5536 	}
   5537 
   5538 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5539 
   5540 	/*
   5541 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5542 	 * the number of queues used actually.
   5543 	 */
   5544 	if (nvectors < hw_nqueues + 1)
   5545 		sc->sc_nqueues = nvectors - 1;
   5546 	else
   5547 		sc->sc_nqueues = hw_nqueues;
   5548 
   5549 	/*
   5550 	 * As queues more then cpus cannot improve scaling, we limit
   5551 	 * the number of queues used actually.
   5552 	 */
   5553 	if (ncpu < sc->sc_nqueues)
   5554 		sc->sc_nqueues = ncpu;
   5555 }
   5556 
   5557 static inline bool
   5558 wm_is_using_msix(struct wm_softc *sc)
   5559 {
   5560 
   5561 	return (sc->sc_nintrs > 1);
   5562 }
   5563 
   5564 static inline bool
   5565 wm_is_using_multiqueue(struct wm_softc *sc)
   5566 {
   5567 
   5568 	return (sc->sc_nqueues > 1);
   5569 }
   5570 
   5571 static int
   5572 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   5573 {
   5574 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5575 
   5576 	wmq->wmq_id = qidx;
   5577 	wmq->wmq_intr_idx = intr_idx;
   5578 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   5579 	    wm_handle_queue, wmq);
   5580 	if (wmq->wmq_si != NULL)
   5581 		return 0;
   5582 
   5583 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5584 	    wmq->wmq_id);
   5585 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5586 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5587 	return ENOMEM;
   5588 }
   5589 
   5590 /*
   5591  * Both single interrupt MSI and INTx can use this function.
   5592  */
   5593 static int
   5594 wm_setup_legacy(struct wm_softc *sc)
   5595 {
   5596 	pci_chipset_tag_t pc = sc->sc_pc;
   5597 	const char *intrstr = NULL;
   5598 	char intrbuf[PCI_INTRSTR_LEN];
   5599 	int error;
   5600 
   5601 	error = wm_alloc_txrx_queues(sc);
   5602 	if (error) {
   5603 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5604 		    error);
   5605 		return ENOMEM;
   5606 	}
   5607 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5608 	    sizeof(intrbuf));
   5609 #ifdef WM_MPSAFE
   5610 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5611 #endif
   5612 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5613 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5614 	if (sc->sc_ihs[0] == NULL) {
   5615 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5616 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5617 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5618 		return ENOMEM;
   5619 	}
   5620 
   5621 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5622 	sc->sc_nintrs = 1;
   5623 
   5624 	return wm_softint_establish_queue(sc, 0, 0);
   5625 }
   5626 
   5627 static int
   5628 wm_setup_msix(struct wm_softc *sc)
   5629 {
   5630 	void *vih;
   5631 	kcpuset_t *affinity;
   5632 	int qidx, error, intr_idx, txrx_established;
   5633 	pci_chipset_tag_t pc = sc->sc_pc;
   5634 	const char *intrstr = NULL;
   5635 	char intrbuf[PCI_INTRSTR_LEN];
   5636 	char intr_xname[INTRDEVNAMEBUF];
   5637 
   5638 	if (sc->sc_nqueues < ncpu) {
   5639 		/*
   5640 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5641 		 * interrupts start from CPU#1.
   5642 		 */
   5643 		sc->sc_affinity_offset = 1;
   5644 	} else {
   5645 		/*
   5646 		 * In this case, this device use all CPUs. So, we unify
   5647 		 * affinitied cpu_index to msix vector number for readability.
   5648 		 */
   5649 		sc->sc_affinity_offset = 0;
   5650 	}
   5651 
   5652 	error = wm_alloc_txrx_queues(sc);
   5653 	if (error) {
   5654 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5655 		    error);
   5656 		return ENOMEM;
   5657 	}
   5658 
   5659 	kcpuset_create(&affinity, false);
   5660 	intr_idx = 0;
   5661 
   5662 	/*
   5663 	 * TX and RX
   5664 	 */
   5665 	txrx_established = 0;
   5666 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5667 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5668 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5669 
   5670 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5671 		    sizeof(intrbuf));
   5672 #ifdef WM_MPSAFE
   5673 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5674 		    PCI_INTR_MPSAFE, true);
   5675 #endif
   5676 		memset(intr_xname, 0, sizeof(intr_xname));
   5677 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5678 		    device_xname(sc->sc_dev), qidx);
   5679 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5680 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5681 		if (vih == NULL) {
   5682 			aprint_error_dev(sc->sc_dev,
   5683 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5684 			    intrstr ? " at " : "",
   5685 			    intrstr ? intrstr : "");
   5686 
   5687 			goto fail;
   5688 		}
   5689 		kcpuset_zero(affinity);
   5690 		/* Round-robin affinity */
   5691 		kcpuset_set(affinity, affinity_to);
   5692 		error = interrupt_distribute(vih, affinity, NULL);
   5693 		if (error == 0) {
   5694 			aprint_normal_dev(sc->sc_dev,
   5695 			    "for TX and RX interrupting at %s affinity to %u\n",
   5696 			    intrstr, affinity_to);
   5697 		} else {
   5698 			aprint_normal_dev(sc->sc_dev,
   5699 			    "for TX and RX interrupting at %s\n", intrstr);
   5700 		}
   5701 		sc->sc_ihs[intr_idx] = vih;
   5702 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   5703 			goto fail;
   5704 		txrx_established++;
   5705 		intr_idx++;
   5706 	}
   5707 
   5708 	/* LINK */
   5709 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5710 	    sizeof(intrbuf));
   5711 #ifdef WM_MPSAFE
   5712 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5713 #endif
   5714 	memset(intr_xname, 0, sizeof(intr_xname));
   5715 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5716 	    device_xname(sc->sc_dev));
   5717 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5718 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5719 	if (vih == NULL) {
   5720 		aprint_error_dev(sc->sc_dev,
   5721 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5722 		    intrstr ? " at " : "",
   5723 		    intrstr ? intrstr : "");
   5724 
   5725 		goto fail;
   5726 	}
   5727 	/* Keep default affinity to LINK interrupt */
   5728 	aprint_normal_dev(sc->sc_dev,
   5729 	    "for LINK interrupting at %s\n", intrstr);
   5730 	sc->sc_ihs[intr_idx] = vih;
   5731 	sc->sc_link_intr_idx = intr_idx;
   5732 
   5733 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5734 	kcpuset_destroy(affinity);
   5735 	return 0;
   5736 
   5737  fail:
   5738 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5739 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5740 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5741 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5742 	}
   5743 
   5744 	kcpuset_destroy(affinity);
   5745 	return ENOMEM;
   5746 }
   5747 
   5748 static void
   5749 wm_unset_stopping_flags(struct wm_softc *sc)
   5750 {
   5751 	int i;
   5752 
   5753 	KASSERT(WM_CORE_LOCKED(sc));
   5754 
   5755 	/* Must unset stopping flags in ascending order. */
   5756 	for (i = 0; i < sc->sc_nqueues; i++) {
   5757 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5758 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5759 
   5760 		mutex_enter(txq->txq_lock);
   5761 		txq->txq_stopping = false;
   5762 		mutex_exit(txq->txq_lock);
   5763 
   5764 		mutex_enter(rxq->rxq_lock);
   5765 		rxq->rxq_stopping = false;
   5766 		mutex_exit(rxq->rxq_lock);
   5767 	}
   5768 
   5769 	sc->sc_core_stopping = false;
   5770 }
   5771 
   5772 static void
   5773 wm_set_stopping_flags(struct wm_softc *sc)
   5774 {
   5775 	int i;
   5776 
   5777 	KASSERT(WM_CORE_LOCKED(sc));
   5778 
   5779 	sc->sc_core_stopping = true;
   5780 
   5781 	/* Must set stopping flags in ascending order. */
   5782 	for (i = 0; i < sc->sc_nqueues; i++) {
   5783 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5784 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5785 
   5786 		mutex_enter(rxq->rxq_lock);
   5787 		rxq->rxq_stopping = true;
   5788 		mutex_exit(rxq->rxq_lock);
   5789 
   5790 		mutex_enter(txq->txq_lock);
   5791 		txq->txq_stopping = true;
   5792 		mutex_exit(txq->txq_lock);
   5793 	}
   5794 }
   5795 
   5796 /*
   5797  * Write interrupt interval value to ITR or EITR
   5798  */
   5799 static void
   5800 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5801 {
   5802 
   5803 	if (!wmq->wmq_set_itr)
   5804 		return;
   5805 
   5806 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5807 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5808 
   5809 		/*
   5810 		 * 82575 doesn't have CNT_INGR field.
   5811 		 * So, overwrite counter field by software.
   5812 		 */
   5813 		if (sc->sc_type == WM_T_82575)
   5814 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5815 		else
   5816 			eitr |= EITR_CNT_INGR;
   5817 
   5818 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5819 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5820 		/*
   5821 		 * 82574 has both ITR and EITR. SET EITR when we use
   5822 		 * the multi queue function with MSI-X.
   5823 		 */
   5824 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5825 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5826 	} else {
   5827 		KASSERT(wmq->wmq_id == 0);
   5828 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5829 	}
   5830 
   5831 	wmq->wmq_set_itr = false;
   5832 }
   5833 
   5834 /*
   5835  * TODO
   5836  * Below dynamic calculation of itr is almost the same as linux igb,
   5837  * however it does not fit to wm(4). So, we will have been disable AIM
   5838  * until we will find appropriate calculation of itr.
   5839  */
   5840 /*
   5841  * calculate interrupt interval value to be going to write register in
   5842  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5843  */
   5844 static void
   5845 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5846 {
   5847 #ifdef NOTYET
   5848 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5849 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5850 	uint32_t avg_size = 0;
   5851 	uint32_t new_itr;
   5852 
   5853 	if (rxq->rxq_packets)
   5854 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5855 	if (txq->txq_packets)
   5856 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5857 
   5858 	if (avg_size == 0) {
   5859 		new_itr = 450; /* restore default value */
   5860 		goto out;
   5861 	}
   5862 
   5863 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5864 	avg_size += 24;
   5865 
   5866 	/* Don't starve jumbo frames */
   5867 	avg_size = uimin(avg_size, 3000);
   5868 
   5869 	/* Give a little boost to mid-size frames */
   5870 	if ((avg_size > 300) && (avg_size < 1200))
   5871 		new_itr = avg_size / 3;
   5872 	else
   5873 		new_itr = avg_size / 2;
   5874 
   5875 out:
   5876 	/*
   5877 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5878 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5879 	 */
   5880 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5881 		new_itr *= 4;
   5882 
   5883 	if (new_itr != wmq->wmq_itr) {
   5884 		wmq->wmq_itr = new_itr;
   5885 		wmq->wmq_set_itr = true;
   5886 	} else
   5887 		wmq->wmq_set_itr = false;
   5888 
   5889 	rxq->rxq_packets = 0;
   5890 	rxq->rxq_bytes = 0;
   5891 	txq->txq_packets = 0;
   5892 	txq->txq_bytes = 0;
   5893 #endif
   5894 }
   5895 
   5896 static void
   5897 wm_init_sysctls(struct wm_softc *sc)
   5898 {
   5899 	struct sysctllog **log;
   5900 	const struct sysctlnode *rnode, *qnode, *cnode;
   5901 	int i, rv;
   5902 	const char *dvname;
   5903 
   5904 	log = &sc->sc_sysctllog;
   5905 	dvname = device_xname(sc->sc_dev);
   5906 
   5907 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5908 	    0, CTLTYPE_NODE, dvname,
   5909 	    SYSCTL_DESCR("wm information and settings"),
   5910 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5911 	if (rv != 0)
   5912 		goto err;
   5913 
   5914 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5915 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5916 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5917 	if (rv != 0)
   5918 		goto teardown;
   5919 
   5920 	for (i = 0; i < sc->sc_nqueues; i++) {
   5921 		struct wm_queue *wmq = &sc->sc_queue[i];
   5922 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5923 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5924 
   5925 		snprintf(sc->sc_queue[i].sysctlname,
   5926 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   5927 
   5928 		if (sysctl_createv(log, 0, &rnode, &qnode,
   5929 		    0, CTLTYPE_NODE,
   5930 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   5931 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   5932 			break;
   5933 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5934 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5935 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   5936 		    NULL, 0, &txq->txq_free,
   5937 		    0, CTL_CREATE, CTL_EOL) != 0)
   5938 			break;
   5939 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5940 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5941 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   5942 		    NULL, 0, &txq->txq_next,
   5943 		    0, CTL_CREATE, CTL_EOL) != 0)
   5944 			break;
   5945 
   5946 		if (sysctl_createv(log, 0, &qnode, &cnode,
   5947 		    CTLFLAG_READONLY, CTLTYPE_INT,
   5948 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   5949 		    NULL, 0, &rxq->rxq_ptr,
   5950 		    0, CTL_CREATE, CTL_EOL) != 0)
   5951 			break;
   5952 	}
   5953 
   5954 #ifdef WM_DEBUG
   5955 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5956 	    CTLTYPE_INT, "debug_flags",
   5957 	    SYSCTL_DESCR(
   5958 		    "Debug flags:\n"	\
   5959 		    "\t0x01 LINK\n"	\
   5960 		    "\t0x02 TX\n"	\
   5961 		    "\t0x04 RX\n"	\
   5962 		    "\t0x08 GMII\n"	\
   5963 		    "\t0x10 MANAGE\n"	\
   5964 		    "\t0x20 NVM\n"	\
   5965 		    "\t0x40 INIT\n"	\
   5966 		    "\t0x80 LOCK"),
   5967 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   5968 	if (rv != 0)
   5969 		goto teardown;
   5970 #endif
   5971 
   5972 	return;
   5973 
   5974 teardown:
   5975 	sysctl_teardown(log);
   5976 err:
   5977 	sc->sc_sysctllog = NULL;
   5978 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   5979 	    __func__, rv);
   5980 }
   5981 
   5982 /*
   5983  * wm_init:		[ifnet interface function]
   5984  *
   5985  *	Initialize the interface.
   5986  */
   5987 static int
   5988 wm_init(struct ifnet *ifp)
   5989 {
   5990 	struct wm_softc *sc = ifp->if_softc;
   5991 	int ret;
   5992 
   5993 	WM_CORE_LOCK(sc);
   5994 	ret = wm_init_locked(ifp);
   5995 	WM_CORE_UNLOCK(sc);
   5996 
   5997 	return ret;
   5998 }
   5999 
   6000 static int
   6001 wm_init_locked(struct ifnet *ifp)
   6002 {
   6003 	struct wm_softc *sc = ifp->if_softc;
   6004 	struct ethercom *ec = &sc->sc_ethercom;
   6005 	int i, j, trynum, error = 0;
   6006 	uint32_t reg, sfp_mask = 0;
   6007 
   6008 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6009 		device_xname(sc->sc_dev), __func__));
   6010 	KASSERT(WM_CORE_LOCKED(sc));
   6011 
   6012 	/*
   6013 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6014 	 * There is a small but measurable benefit to avoiding the adjusment
   6015 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6016 	 * on such platforms.  One possibility is that the DMA itself is
   6017 	 * slightly more efficient if the front of the entire packet (instead
   6018 	 * of the front of the headers) is aligned.
   6019 	 *
   6020 	 * Note we must always set align_tweak to 0 if we are using
   6021 	 * jumbo frames.
   6022 	 */
   6023 #ifdef __NO_STRICT_ALIGNMENT
   6024 	sc->sc_align_tweak = 0;
   6025 #else
   6026 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6027 		sc->sc_align_tweak = 0;
   6028 	else
   6029 		sc->sc_align_tweak = 2;
   6030 #endif /* __NO_STRICT_ALIGNMENT */
   6031 
   6032 	/* Cancel any pending I/O. */
   6033 	wm_stop_locked(ifp, false, false);
   6034 
   6035 	/* Update statistics before reset */
   6036 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6037 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6038 
   6039 	/* PCH_SPT hardware workaround */
   6040 	if (sc->sc_type == WM_T_PCH_SPT)
   6041 		wm_flush_desc_rings(sc);
   6042 
   6043 	/* Reset the chip to a known state. */
   6044 	wm_reset(sc);
   6045 
   6046 	/*
   6047 	 * AMT based hardware can now take control from firmware
   6048 	 * Do this after reset.
   6049 	 */
   6050 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6051 		wm_get_hw_control(sc);
   6052 
   6053 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6054 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6055 		wm_legacy_irq_quirk_spt(sc);
   6056 
   6057 	/* Init hardware bits */
   6058 	wm_initialize_hardware_bits(sc);
   6059 
   6060 	/* Reset the PHY. */
   6061 	if (sc->sc_flags & WM_F_HAS_MII)
   6062 		wm_gmii_reset(sc);
   6063 
   6064 	if (sc->sc_type >= WM_T_ICH8) {
   6065 		reg = CSR_READ(sc, WMREG_GCR);
   6066 		/*
   6067 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6068 		 * default after reset.
   6069 		 */
   6070 		if (sc->sc_type == WM_T_ICH8)
   6071 			reg |= GCR_NO_SNOOP_ALL;
   6072 		else
   6073 			reg &= ~GCR_NO_SNOOP_ALL;
   6074 		CSR_WRITE(sc, WMREG_GCR, reg);
   6075 	}
   6076 
   6077 	if ((sc->sc_type >= WM_T_ICH8)
   6078 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6079 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6080 
   6081 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6082 		reg |= CTRL_EXT_RO_DIS;
   6083 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6084 	}
   6085 
   6086 	/* Calculate (E)ITR value */
   6087 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6088 		/*
   6089 		 * For NEWQUEUE's EITR (except for 82575).
   6090 		 * 82575's EITR should be set same throttling value as other
   6091 		 * old controllers' ITR because the interrupt/sec calculation
   6092 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6093 		 *
   6094 		 * 82574's EITR should be set same throttling value as ITR.
   6095 		 *
   6096 		 * For N interrupts/sec, set this value to:
   6097 		 * 1,000,000 / N in contrast to ITR throttoling value.
   6098 		 */
   6099 		sc->sc_itr_init = 450;
   6100 	} else if (sc->sc_type >= WM_T_82543) {
   6101 		/*
   6102 		 * Set up the interrupt throttling register (units of 256ns)
   6103 		 * Note that a footnote in Intel's documentation says this
   6104 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6105 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6106 		 * that that is also true for the 1024ns units of the other
   6107 		 * interrupt-related timer registers -- so, really, we ought
   6108 		 * to divide this value by 4 when the link speed is low.
   6109 		 *
   6110 		 * XXX implement this division at link speed change!
   6111 		 */
   6112 
   6113 		/*
   6114 		 * For N interrupts/sec, set this value to:
   6115 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6116 		 * absolute and packet timer values to this value
   6117 		 * divided by 4 to get "simple timer" behavior.
   6118 		 */
   6119 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6120 	}
   6121 
   6122 	error = wm_init_txrx_queues(sc);
   6123 	if (error)
   6124 		goto out;
   6125 
   6126 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6127 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6128 	    (sc->sc_type >= WM_T_82575))
   6129 		wm_serdes_power_up_link_82575(sc);
   6130 
   6131 	/* Clear out the VLAN table -- we don't use it (yet). */
   6132 	CSR_WRITE(sc, WMREG_VET, 0);
   6133 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6134 		trynum = 10; /* Due to hw errata */
   6135 	else
   6136 		trynum = 1;
   6137 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6138 		for (j = 0; j < trynum; j++)
   6139 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6140 
   6141 	/*
   6142 	 * Set up flow-control parameters.
   6143 	 *
   6144 	 * XXX Values could probably stand some tuning.
   6145 	 */
   6146 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6147 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6148 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6149 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6150 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6151 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6152 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6153 	}
   6154 
   6155 	sc->sc_fcrtl = FCRTL_DFLT;
   6156 	if (sc->sc_type < WM_T_82543) {
   6157 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6158 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6159 	} else {
   6160 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6161 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6162 	}
   6163 
   6164 	if (sc->sc_type == WM_T_80003)
   6165 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6166 	else
   6167 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6168 
   6169 	/* Writes the control register. */
   6170 	wm_set_vlan(sc);
   6171 
   6172 	if (sc->sc_flags & WM_F_HAS_MII) {
   6173 		uint16_t kmreg;
   6174 
   6175 		switch (sc->sc_type) {
   6176 		case WM_T_80003:
   6177 		case WM_T_ICH8:
   6178 		case WM_T_ICH9:
   6179 		case WM_T_ICH10:
   6180 		case WM_T_PCH:
   6181 		case WM_T_PCH2:
   6182 		case WM_T_PCH_LPT:
   6183 		case WM_T_PCH_SPT:
   6184 		case WM_T_PCH_CNP:
   6185 			/*
   6186 			 * Set the mac to wait the maximum time between each
   6187 			 * iteration and increase the max iterations when
   6188 			 * polling the phy; this fixes erroneous timeouts at
   6189 			 * 10Mbps.
   6190 			 */
   6191 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6192 			    0xFFFF);
   6193 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6194 			    &kmreg);
   6195 			kmreg |= 0x3F;
   6196 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6197 			    kmreg);
   6198 			break;
   6199 		default:
   6200 			break;
   6201 		}
   6202 
   6203 		if (sc->sc_type == WM_T_80003) {
   6204 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6205 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6206 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6207 
   6208 			/* Bypass RX and TX FIFO's */
   6209 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6210 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6211 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6212 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6213 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6214 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6215 		}
   6216 	}
   6217 #if 0
   6218 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6219 #endif
   6220 
   6221 	/* Set up checksum offload parameters. */
   6222 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6223 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6224 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6225 		reg |= RXCSUM_IPOFL;
   6226 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6227 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6228 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6229 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6230 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6231 
   6232 	/* Set registers about MSI-X */
   6233 	if (wm_is_using_msix(sc)) {
   6234 		uint32_t ivar, qintr_idx;
   6235 		struct wm_queue *wmq;
   6236 		unsigned int qid;
   6237 
   6238 		if (sc->sc_type == WM_T_82575) {
   6239 			/* Interrupt control */
   6240 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6241 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6242 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6243 
   6244 			/* TX and RX */
   6245 			for (i = 0; i < sc->sc_nqueues; i++) {
   6246 				wmq = &sc->sc_queue[i];
   6247 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6248 				    EITR_TX_QUEUE(wmq->wmq_id)
   6249 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6250 			}
   6251 			/* Link status */
   6252 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6253 			    EITR_OTHER);
   6254 		} else if (sc->sc_type == WM_T_82574) {
   6255 			/* Interrupt control */
   6256 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6257 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6258 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6259 
   6260 			/*
   6261 			 * Workaround issue with spurious interrupts
   6262 			 * in MSI-X mode.
   6263 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6264 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6265 			 */
   6266 			reg = CSR_READ(sc, WMREG_RFCTL);
   6267 			reg |= WMREG_RFCTL_ACKDIS;
   6268 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6269 
   6270 			ivar = 0;
   6271 			/* TX and RX */
   6272 			for (i = 0; i < sc->sc_nqueues; i++) {
   6273 				wmq = &sc->sc_queue[i];
   6274 				qid = wmq->wmq_id;
   6275 				qintr_idx = wmq->wmq_intr_idx;
   6276 
   6277 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6278 				    IVAR_TX_MASK_Q_82574(qid));
   6279 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6280 				    IVAR_RX_MASK_Q_82574(qid));
   6281 			}
   6282 			/* Link status */
   6283 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6284 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6285 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6286 		} else {
   6287 			/* Interrupt control */
   6288 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6289 			    | GPIE_EIAME | GPIE_PBA);
   6290 
   6291 			switch (sc->sc_type) {
   6292 			case WM_T_82580:
   6293 			case WM_T_I350:
   6294 			case WM_T_I354:
   6295 			case WM_T_I210:
   6296 			case WM_T_I211:
   6297 				/* TX and RX */
   6298 				for (i = 0; i < sc->sc_nqueues; i++) {
   6299 					wmq = &sc->sc_queue[i];
   6300 					qid = wmq->wmq_id;
   6301 					qintr_idx = wmq->wmq_intr_idx;
   6302 
   6303 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6304 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6305 					ivar |= __SHIFTIN((qintr_idx
   6306 						| IVAR_VALID),
   6307 					    IVAR_TX_MASK_Q(qid));
   6308 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6309 					ivar |= __SHIFTIN((qintr_idx
   6310 						| IVAR_VALID),
   6311 					    IVAR_RX_MASK_Q(qid));
   6312 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6313 				}
   6314 				break;
   6315 			case WM_T_82576:
   6316 				/* TX and RX */
   6317 				for (i = 0; i < sc->sc_nqueues; i++) {
   6318 					wmq = &sc->sc_queue[i];
   6319 					qid = wmq->wmq_id;
   6320 					qintr_idx = wmq->wmq_intr_idx;
   6321 
   6322 					ivar = CSR_READ(sc,
   6323 					    WMREG_IVAR_Q_82576(qid));
   6324 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6325 					ivar |= __SHIFTIN((qintr_idx
   6326 						| IVAR_VALID),
   6327 					    IVAR_TX_MASK_Q_82576(qid));
   6328 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6329 					ivar |= __SHIFTIN((qintr_idx
   6330 						| IVAR_VALID),
   6331 					    IVAR_RX_MASK_Q_82576(qid));
   6332 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6333 					    ivar);
   6334 				}
   6335 				break;
   6336 			default:
   6337 				break;
   6338 			}
   6339 
   6340 			/* Link status */
   6341 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6342 			    IVAR_MISC_OTHER);
   6343 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6344 		}
   6345 
   6346 		if (wm_is_using_multiqueue(sc)) {
   6347 			wm_init_rss(sc);
   6348 
   6349 			/*
   6350 			** NOTE: Receive Full-Packet Checksum Offload
   6351 			** is mutually exclusive with Multiqueue. However
   6352 			** this is not the same as TCP/IP checksums which
   6353 			** still work.
   6354 			*/
   6355 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6356 			reg |= RXCSUM_PCSD;
   6357 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6358 		}
   6359 	}
   6360 
   6361 	/* Set up the interrupt registers. */
   6362 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6363 
   6364 	/* Enable SFP module insertion interrupt if it's required */
   6365 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6366 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6367 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6368 		sfp_mask = ICR_GPI(0);
   6369 	}
   6370 
   6371 	if (wm_is_using_msix(sc)) {
   6372 		uint32_t mask;
   6373 		struct wm_queue *wmq;
   6374 
   6375 		switch (sc->sc_type) {
   6376 		case WM_T_82574:
   6377 			mask = 0;
   6378 			for (i = 0; i < sc->sc_nqueues; i++) {
   6379 				wmq = &sc->sc_queue[i];
   6380 				mask |= ICR_TXQ(wmq->wmq_id);
   6381 				mask |= ICR_RXQ(wmq->wmq_id);
   6382 			}
   6383 			mask |= ICR_OTHER;
   6384 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6385 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6386 			break;
   6387 		default:
   6388 			if (sc->sc_type == WM_T_82575) {
   6389 				mask = 0;
   6390 				for (i = 0; i < sc->sc_nqueues; i++) {
   6391 					wmq = &sc->sc_queue[i];
   6392 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6393 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6394 				}
   6395 				mask |= EITR_OTHER;
   6396 			} else {
   6397 				mask = 0;
   6398 				for (i = 0; i < sc->sc_nqueues; i++) {
   6399 					wmq = &sc->sc_queue[i];
   6400 					mask |= 1 << wmq->wmq_intr_idx;
   6401 				}
   6402 				mask |= 1 << sc->sc_link_intr_idx;
   6403 			}
   6404 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6405 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6406 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6407 
   6408 			/* For other interrupts */
   6409 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6410 			break;
   6411 		}
   6412 	} else {
   6413 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6414 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6415 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6416 	}
   6417 
   6418 	/* Set up the inter-packet gap. */
   6419 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6420 
   6421 	if (sc->sc_type >= WM_T_82543) {
   6422 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6423 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6424 			wm_itrs_writereg(sc, wmq);
   6425 		}
   6426 		/*
   6427 		 * Link interrupts occur much less than TX
   6428 		 * interrupts and RX interrupts. So, we don't
   6429 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6430 		 * FreeBSD's if_igb.
   6431 		 */
   6432 	}
   6433 
   6434 	/* Set the VLAN ethernetype. */
   6435 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6436 
   6437 	/*
   6438 	 * Set up the transmit control register; we start out with
   6439 	 * a collision distance suitable for FDX, but update it whe
   6440 	 * we resolve the media type.
   6441 	 */
   6442 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6443 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6444 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6445 	if (sc->sc_type >= WM_T_82571)
   6446 		sc->sc_tctl |= TCTL_MULR;
   6447 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6448 
   6449 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6450 		/* Write TDT after TCTL.EN is set. See the document. */
   6451 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6452 	}
   6453 
   6454 	if (sc->sc_type == WM_T_80003) {
   6455 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6456 		reg &= ~TCTL_EXT_GCEX_MASK;
   6457 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6458 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6459 	}
   6460 
   6461 	/* Set the media. */
   6462 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6463 		goto out;
   6464 
   6465 	/* Configure for OS presence */
   6466 	wm_init_manageability(sc);
   6467 
   6468 	/*
   6469 	 * Set up the receive control register; we actually program the
   6470 	 * register when we set the receive filter. Use multicast address
   6471 	 * offset type 0.
   6472 	 *
   6473 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6474 	 * don't enable that feature.
   6475 	 */
   6476 	sc->sc_mchash_type = 0;
   6477 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6478 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6479 
   6480 	/* 82574 use one buffer extended Rx descriptor. */
   6481 	if (sc->sc_type == WM_T_82574)
   6482 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6483 
   6484 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   6485 		sc->sc_rctl |= RCTL_SECRC;
   6486 
   6487 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6488 	    && (ifp->if_mtu > ETHERMTU)) {
   6489 		sc->sc_rctl |= RCTL_LPE;
   6490 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6491 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6492 	}
   6493 
   6494 	if (MCLBYTES == 2048)
   6495 		sc->sc_rctl |= RCTL_2k;
   6496 	else {
   6497 		if (sc->sc_type >= WM_T_82543) {
   6498 			switch (MCLBYTES) {
   6499 			case 4096:
   6500 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6501 				break;
   6502 			case 8192:
   6503 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6504 				break;
   6505 			case 16384:
   6506 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6507 				break;
   6508 			default:
   6509 				panic("wm_init: MCLBYTES %d unsupported",
   6510 				    MCLBYTES);
   6511 				break;
   6512 			}
   6513 		} else
   6514 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6515 	}
   6516 
   6517 	/* Enable ECC */
   6518 	switch (sc->sc_type) {
   6519 	case WM_T_82571:
   6520 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6521 		reg |= PBA_ECC_CORR_EN;
   6522 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6523 		break;
   6524 	case WM_T_PCH_LPT:
   6525 	case WM_T_PCH_SPT:
   6526 	case WM_T_PCH_CNP:
   6527 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6528 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6529 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6530 
   6531 		sc->sc_ctrl |= CTRL_MEHE;
   6532 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6533 		break;
   6534 	default:
   6535 		break;
   6536 	}
   6537 
   6538 	/*
   6539 	 * Set the receive filter.
   6540 	 *
   6541 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6542 	 * the setting of RCTL.EN in wm_set_filter()
   6543 	 */
   6544 	wm_set_filter(sc);
   6545 
   6546 	/* On 575 and later set RDT only if RX enabled */
   6547 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6548 		int qidx;
   6549 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6550 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6551 			for (i = 0; i < WM_NRXDESC; i++) {
   6552 				mutex_enter(rxq->rxq_lock);
   6553 				wm_init_rxdesc(rxq, i);
   6554 				mutex_exit(rxq->rxq_lock);
   6555 
   6556 			}
   6557 		}
   6558 	}
   6559 
   6560 	wm_unset_stopping_flags(sc);
   6561 
   6562 	/* Start the one second link check clock. */
   6563 	callout_schedule(&sc->sc_tick_ch, hz);
   6564 
   6565 	/* ...all done! */
   6566 	ifp->if_flags |= IFF_RUNNING;
   6567 
   6568  out:
   6569 	/* Save last flags for the callback */
   6570 	sc->sc_if_flags = ifp->if_flags;
   6571 	sc->sc_ec_capenable = ec->ec_capenable;
   6572 	if (error)
   6573 		log(LOG_ERR, "%s: interface not running\n",
   6574 		    device_xname(sc->sc_dev));
   6575 	return error;
   6576 }
   6577 
   6578 /*
   6579  * wm_stop:		[ifnet interface function]
   6580  *
   6581  *	Stop transmission on the interface.
   6582  */
   6583 static void
   6584 wm_stop(struct ifnet *ifp, int disable)
   6585 {
   6586 	struct wm_softc *sc = ifp->if_softc;
   6587 
   6588 	ASSERT_SLEEPABLE();
   6589 
   6590 	WM_CORE_LOCK(sc);
   6591 	wm_stop_locked(ifp, disable ? true : false, true);
   6592 	WM_CORE_UNLOCK(sc);
   6593 
   6594 	/*
   6595 	 * After wm_set_stopping_flags(), it is guaranteed
   6596 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   6597 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   6598 	 * because it can sleep...
   6599 	 * so, call workqueue_wait() here.
   6600 	 */
   6601 	for (int i = 0; i < sc->sc_nqueues; i++)
   6602 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   6603 }
   6604 
   6605 static void
   6606 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   6607 {
   6608 	struct wm_softc *sc = ifp->if_softc;
   6609 	struct wm_txsoft *txs;
   6610 	int i, qidx;
   6611 
   6612 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6613 		device_xname(sc->sc_dev), __func__));
   6614 	KASSERT(WM_CORE_LOCKED(sc));
   6615 
   6616 	wm_set_stopping_flags(sc);
   6617 
   6618 	if (sc->sc_flags & WM_F_HAS_MII) {
   6619 		/* Down the MII. */
   6620 		mii_down(&sc->sc_mii);
   6621 	} else {
   6622 #if 0
   6623 		/* Should we clear PHY's status properly? */
   6624 		wm_reset(sc);
   6625 #endif
   6626 	}
   6627 
   6628 	/* Stop the transmit and receive processes. */
   6629 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6630 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6631 	sc->sc_rctl &= ~RCTL_EN;
   6632 
   6633 	/*
   6634 	 * Clear the interrupt mask to ensure the device cannot assert its
   6635 	 * interrupt line.
   6636 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6637 	 * service any currently pending or shared interrupt.
   6638 	 */
   6639 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6640 	sc->sc_icr = 0;
   6641 	if (wm_is_using_msix(sc)) {
   6642 		if (sc->sc_type != WM_T_82574) {
   6643 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6644 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6645 		} else
   6646 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6647 	}
   6648 
   6649 	/*
   6650 	 * Stop callouts after interrupts are disabled; if we have
   6651 	 * to wait for them, we will be releasing the CORE_LOCK
   6652 	 * briefly, which will unblock interrupts on the current CPU.
   6653 	 */
   6654 
   6655 	/* Stop the one second clock. */
   6656 	if (wait)
   6657 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   6658 	else
   6659 		callout_stop(&sc->sc_tick_ch);
   6660 
   6661 	/* Stop the 82547 Tx FIFO stall check timer. */
   6662 	if (sc->sc_type == WM_T_82547) {
   6663 		if (wait)
   6664 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   6665 		else
   6666 			callout_stop(&sc->sc_txfifo_ch);
   6667 	}
   6668 
   6669 	/* Release any queued transmit buffers. */
   6670 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6671 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6672 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6673 		struct mbuf *m;
   6674 
   6675 		mutex_enter(txq->txq_lock);
   6676 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6677 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6678 			txs = &txq->txq_soft[i];
   6679 			if (txs->txs_mbuf != NULL) {
   6680 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6681 				m_freem(txs->txs_mbuf);
   6682 				txs->txs_mbuf = NULL;
   6683 			}
   6684 		}
   6685 		/* Drain txq_interq */
   6686 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6687 			m_freem(m);
   6688 		mutex_exit(txq->txq_lock);
   6689 	}
   6690 
   6691 	/* Mark the interface as down and cancel the watchdog timer. */
   6692 	ifp->if_flags &= ~IFF_RUNNING;
   6693 
   6694 	if (disable) {
   6695 		for (i = 0; i < sc->sc_nqueues; i++) {
   6696 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6697 			mutex_enter(rxq->rxq_lock);
   6698 			wm_rxdrain(rxq);
   6699 			mutex_exit(rxq->rxq_lock);
   6700 		}
   6701 	}
   6702 
   6703 #if 0 /* notyet */
   6704 	if (sc->sc_type >= WM_T_82544)
   6705 		CSR_WRITE(sc, WMREG_WUC, 0);
   6706 #endif
   6707 }
   6708 
   6709 static void
   6710 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6711 {
   6712 	struct mbuf *m;
   6713 	int i;
   6714 
   6715 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6716 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6717 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6718 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6719 		    m->m_data, m->m_len, m->m_flags);
   6720 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6721 	    i, i == 1 ? "" : "s");
   6722 }
   6723 
   6724 /*
   6725  * wm_82547_txfifo_stall:
   6726  *
   6727  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6728  *	reset the FIFO pointers, and restart packet transmission.
   6729  */
   6730 static void
   6731 wm_82547_txfifo_stall(void *arg)
   6732 {
   6733 	struct wm_softc *sc = arg;
   6734 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6735 
   6736 	mutex_enter(txq->txq_lock);
   6737 
   6738 	if (txq->txq_stopping)
   6739 		goto out;
   6740 
   6741 	if (txq->txq_fifo_stall) {
   6742 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6743 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6744 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6745 			/*
   6746 			 * Packets have drained.  Stop transmitter, reset
   6747 			 * FIFO pointers, restart transmitter, and kick
   6748 			 * the packet queue.
   6749 			 */
   6750 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6751 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6752 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6753 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6754 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6755 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6756 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6757 			CSR_WRITE_FLUSH(sc);
   6758 
   6759 			txq->txq_fifo_head = 0;
   6760 			txq->txq_fifo_stall = 0;
   6761 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6762 		} else {
   6763 			/*
   6764 			 * Still waiting for packets to drain; try again in
   6765 			 * another tick.
   6766 			 */
   6767 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6768 		}
   6769 	}
   6770 
   6771 out:
   6772 	mutex_exit(txq->txq_lock);
   6773 }
   6774 
   6775 /*
   6776  * wm_82547_txfifo_bugchk:
   6777  *
   6778  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6779  *	prevent enqueueing a packet that would wrap around the end
   6780  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6781  *
   6782  *	We do this by checking the amount of space before the end
   6783  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6784  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6785  *	the internal FIFO pointers to the beginning, and restart
   6786  *	transmission on the interface.
   6787  */
   6788 #define	WM_FIFO_HDR		0x10
   6789 #define	WM_82547_PAD_LEN	0x3e0
   6790 static int
   6791 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6792 {
   6793 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6794 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6795 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6796 
   6797 	/* Just return if already stalled. */
   6798 	if (txq->txq_fifo_stall)
   6799 		return 1;
   6800 
   6801 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6802 		/* Stall only occurs in half-duplex mode. */
   6803 		goto send_packet;
   6804 	}
   6805 
   6806 	if (len >= WM_82547_PAD_LEN + space) {
   6807 		txq->txq_fifo_stall = 1;
   6808 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6809 		return 1;
   6810 	}
   6811 
   6812  send_packet:
   6813 	txq->txq_fifo_head += len;
   6814 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6815 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6816 
   6817 	return 0;
   6818 }
   6819 
   6820 static int
   6821 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6822 {
   6823 	int error;
   6824 
   6825 	/*
   6826 	 * Allocate the control data structures, and create and load the
   6827 	 * DMA map for it.
   6828 	 *
   6829 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6830 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6831 	 * both sets within the same 4G segment.
   6832 	 */
   6833 	if (sc->sc_type < WM_T_82544)
   6834 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6835 	else
   6836 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6837 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6838 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6839 	else
   6840 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6841 
   6842 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6843 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6844 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6845 		aprint_error_dev(sc->sc_dev,
   6846 		    "unable to allocate TX control data, error = %d\n",
   6847 		    error);
   6848 		goto fail_0;
   6849 	}
   6850 
   6851 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6852 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6853 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6854 		aprint_error_dev(sc->sc_dev,
   6855 		    "unable to map TX control data, error = %d\n", error);
   6856 		goto fail_1;
   6857 	}
   6858 
   6859 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6860 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6861 		aprint_error_dev(sc->sc_dev,
   6862 		    "unable to create TX control data DMA map, error = %d\n",
   6863 		    error);
   6864 		goto fail_2;
   6865 	}
   6866 
   6867 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6868 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6869 		aprint_error_dev(sc->sc_dev,
   6870 		    "unable to load TX control data DMA map, error = %d\n",
   6871 		    error);
   6872 		goto fail_3;
   6873 	}
   6874 
   6875 	return 0;
   6876 
   6877  fail_3:
   6878 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6879  fail_2:
   6880 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6881 	    WM_TXDESCS_SIZE(txq));
   6882  fail_1:
   6883 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6884  fail_0:
   6885 	return error;
   6886 }
   6887 
   6888 static void
   6889 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6890 {
   6891 
   6892 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6893 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6894 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6895 	    WM_TXDESCS_SIZE(txq));
   6896 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6897 }
   6898 
   6899 static int
   6900 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6901 {
   6902 	int error;
   6903 	size_t rxq_descs_size;
   6904 
   6905 	/*
   6906 	 * Allocate the control data structures, and create and load the
   6907 	 * DMA map for it.
   6908 	 *
   6909 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6910 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6911 	 * both sets within the same 4G segment.
   6912 	 */
   6913 	rxq->rxq_ndesc = WM_NRXDESC;
   6914 	if (sc->sc_type == WM_T_82574)
   6915 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6916 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6917 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6918 	else
   6919 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6920 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6921 
   6922 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6923 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6924 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6925 		aprint_error_dev(sc->sc_dev,
   6926 		    "unable to allocate RX control data, error = %d\n",
   6927 		    error);
   6928 		goto fail_0;
   6929 	}
   6930 
   6931 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6932 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6933 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6934 		aprint_error_dev(sc->sc_dev,
   6935 		    "unable to map RX control data, error = %d\n", error);
   6936 		goto fail_1;
   6937 	}
   6938 
   6939 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6940 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6941 		aprint_error_dev(sc->sc_dev,
   6942 		    "unable to create RX control data DMA map, error = %d\n",
   6943 		    error);
   6944 		goto fail_2;
   6945 	}
   6946 
   6947 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6948 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6949 		aprint_error_dev(sc->sc_dev,
   6950 		    "unable to load RX control data DMA map, error = %d\n",
   6951 		    error);
   6952 		goto fail_3;
   6953 	}
   6954 
   6955 	return 0;
   6956 
   6957  fail_3:
   6958 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6959  fail_2:
   6960 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6961 	    rxq_descs_size);
   6962  fail_1:
   6963 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6964  fail_0:
   6965 	return error;
   6966 }
   6967 
   6968 static void
   6969 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6970 {
   6971 
   6972 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6973 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6974 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6975 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6976 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6977 }
   6978 
   6979 
   6980 static int
   6981 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6982 {
   6983 	int i, error;
   6984 
   6985 	/* Create the transmit buffer DMA maps. */
   6986 	WM_TXQUEUELEN(txq) =
   6987 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6988 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6989 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6990 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6991 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6992 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6993 			aprint_error_dev(sc->sc_dev,
   6994 			    "unable to create Tx DMA map %d, error = %d\n",
   6995 			    i, error);
   6996 			goto fail;
   6997 		}
   6998 	}
   6999 
   7000 	return 0;
   7001 
   7002  fail:
   7003 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7004 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7005 			bus_dmamap_destroy(sc->sc_dmat,
   7006 			    txq->txq_soft[i].txs_dmamap);
   7007 	}
   7008 	return error;
   7009 }
   7010 
   7011 static void
   7012 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7013 {
   7014 	int i;
   7015 
   7016 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7017 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7018 			bus_dmamap_destroy(sc->sc_dmat,
   7019 			    txq->txq_soft[i].txs_dmamap);
   7020 	}
   7021 }
   7022 
   7023 static int
   7024 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7025 {
   7026 	int i, error;
   7027 
   7028 	/* Create the receive buffer DMA maps. */
   7029 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7030 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7031 			    MCLBYTES, 0, 0,
   7032 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7033 			aprint_error_dev(sc->sc_dev,
   7034 			    "unable to create Rx DMA map %d error = %d\n",
   7035 			    i, error);
   7036 			goto fail;
   7037 		}
   7038 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7039 	}
   7040 
   7041 	return 0;
   7042 
   7043  fail:
   7044 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7045 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7046 			bus_dmamap_destroy(sc->sc_dmat,
   7047 			    rxq->rxq_soft[i].rxs_dmamap);
   7048 	}
   7049 	return error;
   7050 }
   7051 
   7052 static void
   7053 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7054 {
   7055 	int i;
   7056 
   7057 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7058 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7059 			bus_dmamap_destroy(sc->sc_dmat,
   7060 			    rxq->rxq_soft[i].rxs_dmamap);
   7061 	}
   7062 }
   7063 
   7064 /*
   7065  * wm_alloc_quques:
   7066  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7067  */
   7068 static int
   7069 wm_alloc_txrx_queues(struct wm_softc *sc)
   7070 {
   7071 	int i, error, tx_done, rx_done;
   7072 
   7073 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7074 	    KM_SLEEP);
   7075 	if (sc->sc_queue == NULL) {
   7076 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7077 		error = ENOMEM;
   7078 		goto fail_0;
   7079 	}
   7080 
   7081 	/* For transmission */
   7082 	error = 0;
   7083 	tx_done = 0;
   7084 	for (i = 0; i < sc->sc_nqueues; i++) {
   7085 #ifdef WM_EVENT_COUNTERS
   7086 		int j;
   7087 		const char *xname;
   7088 #endif
   7089 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7090 		txq->txq_sc = sc;
   7091 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7092 
   7093 		error = wm_alloc_tx_descs(sc, txq);
   7094 		if (error)
   7095 			break;
   7096 		error = wm_alloc_tx_buffer(sc, txq);
   7097 		if (error) {
   7098 			wm_free_tx_descs(sc, txq);
   7099 			break;
   7100 		}
   7101 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7102 		if (txq->txq_interq == NULL) {
   7103 			wm_free_tx_descs(sc, txq);
   7104 			wm_free_tx_buffer(sc, txq);
   7105 			error = ENOMEM;
   7106 			break;
   7107 		}
   7108 
   7109 #ifdef WM_EVENT_COUNTERS
   7110 		xname = device_xname(sc->sc_dev);
   7111 
   7112 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7113 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7114 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7115 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7116 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7117 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7118 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7119 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7120 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7121 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7122 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7123 
   7124 		for (j = 0; j < WM_NTXSEGS; j++) {
   7125 			snprintf(txq->txq_txseg_evcnt_names[j],
   7126 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   7127 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   7128 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7129 		}
   7130 
   7131 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7132 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7133 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7134 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7135 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7136 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7137 #endif /* WM_EVENT_COUNTERS */
   7138 
   7139 		tx_done++;
   7140 	}
   7141 	if (error)
   7142 		goto fail_1;
   7143 
   7144 	/* For receive */
   7145 	error = 0;
   7146 	rx_done = 0;
   7147 	for (i = 0; i < sc->sc_nqueues; i++) {
   7148 #ifdef WM_EVENT_COUNTERS
   7149 		const char *xname;
   7150 #endif
   7151 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7152 		rxq->rxq_sc = sc;
   7153 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7154 
   7155 		error = wm_alloc_rx_descs(sc, rxq);
   7156 		if (error)
   7157 			break;
   7158 
   7159 		error = wm_alloc_rx_buffer(sc, rxq);
   7160 		if (error) {
   7161 			wm_free_rx_descs(sc, rxq);
   7162 			break;
   7163 		}
   7164 
   7165 #ifdef WM_EVENT_COUNTERS
   7166 		xname = device_xname(sc->sc_dev);
   7167 
   7168 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7169 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7170 
   7171 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7172 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7173 #endif /* WM_EVENT_COUNTERS */
   7174 
   7175 		rx_done++;
   7176 	}
   7177 	if (error)
   7178 		goto fail_2;
   7179 
   7180 	return 0;
   7181 
   7182  fail_2:
   7183 	for (i = 0; i < rx_done; i++) {
   7184 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7185 		wm_free_rx_buffer(sc, rxq);
   7186 		wm_free_rx_descs(sc, rxq);
   7187 		if (rxq->rxq_lock)
   7188 			mutex_obj_free(rxq->rxq_lock);
   7189 	}
   7190  fail_1:
   7191 	for (i = 0; i < tx_done; i++) {
   7192 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7193 		pcq_destroy(txq->txq_interq);
   7194 		wm_free_tx_buffer(sc, txq);
   7195 		wm_free_tx_descs(sc, txq);
   7196 		if (txq->txq_lock)
   7197 			mutex_obj_free(txq->txq_lock);
   7198 	}
   7199 
   7200 	kmem_free(sc->sc_queue,
   7201 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7202  fail_0:
   7203 	return error;
   7204 }
   7205 
   7206 /*
   7207  * wm_free_quques:
   7208  *	Free {tx,rx}descs and {tx,rx} buffers
   7209  */
   7210 static void
   7211 wm_free_txrx_queues(struct wm_softc *sc)
   7212 {
   7213 	int i;
   7214 
   7215 	for (i = 0; i < sc->sc_nqueues; i++) {
   7216 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7217 
   7218 #ifdef WM_EVENT_COUNTERS
   7219 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7220 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7221 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7222 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7223 #endif /* WM_EVENT_COUNTERS */
   7224 
   7225 		wm_free_rx_buffer(sc, rxq);
   7226 		wm_free_rx_descs(sc, rxq);
   7227 		if (rxq->rxq_lock)
   7228 			mutex_obj_free(rxq->rxq_lock);
   7229 	}
   7230 
   7231 	for (i = 0; i < sc->sc_nqueues; i++) {
   7232 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7233 		struct mbuf *m;
   7234 #ifdef WM_EVENT_COUNTERS
   7235 		int j;
   7236 
   7237 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7238 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7239 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7240 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7241 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7242 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7243 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7244 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7245 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7246 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7247 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7248 
   7249 		for (j = 0; j < WM_NTXSEGS; j++)
   7250 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7251 
   7252 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7253 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7254 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7255 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7256 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7257 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7258 #endif /* WM_EVENT_COUNTERS */
   7259 
   7260 		/* Drain txq_interq */
   7261 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7262 			m_freem(m);
   7263 		pcq_destroy(txq->txq_interq);
   7264 
   7265 		wm_free_tx_buffer(sc, txq);
   7266 		wm_free_tx_descs(sc, txq);
   7267 		if (txq->txq_lock)
   7268 			mutex_obj_free(txq->txq_lock);
   7269 	}
   7270 
   7271 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7272 }
   7273 
   7274 static void
   7275 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7276 {
   7277 
   7278 	KASSERT(mutex_owned(txq->txq_lock));
   7279 
   7280 	/* Initialize the transmit descriptor ring. */
   7281 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7282 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7283 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7284 	txq->txq_free = WM_NTXDESC(txq);
   7285 	txq->txq_next = 0;
   7286 }
   7287 
   7288 static void
   7289 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7290     struct wm_txqueue *txq)
   7291 {
   7292 
   7293 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7294 		device_xname(sc->sc_dev), __func__));
   7295 	KASSERT(mutex_owned(txq->txq_lock));
   7296 
   7297 	if (sc->sc_type < WM_T_82543) {
   7298 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7299 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7300 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7301 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7302 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7303 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7304 	} else {
   7305 		int qid = wmq->wmq_id;
   7306 
   7307 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7308 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7309 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7310 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7311 
   7312 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7313 			/*
   7314 			 * Don't write TDT before TCTL.EN is set.
   7315 			 * See the document.
   7316 			 */
   7317 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7318 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7319 			    | TXDCTL_WTHRESH(0));
   7320 		else {
   7321 			/* XXX should update with AIM? */
   7322 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7323 			if (sc->sc_type >= WM_T_82540) {
   7324 				/* Should be the same */
   7325 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7326 			}
   7327 
   7328 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7329 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7330 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7331 		}
   7332 	}
   7333 }
   7334 
   7335 static void
   7336 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7337 {
   7338 	int i;
   7339 
   7340 	KASSERT(mutex_owned(txq->txq_lock));
   7341 
   7342 	/* Initialize the transmit job descriptors. */
   7343 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7344 		txq->txq_soft[i].txs_mbuf = NULL;
   7345 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7346 	txq->txq_snext = 0;
   7347 	txq->txq_sdirty = 0;
   7348 }
   7349 
   7350 static void
   7351 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7352     struct wm_txqueue *txq)
   7353 {
   7354 
   7355 	KASSERT(mutex_owned(txq->txq_lock));
   7356 
   7357 	/*
   7358 	 * Set up some register offsets that are different between
   7359 	 * the i82542 and the i82543 and later chips.
   7360 	 */
   7361 	if (sc->sc_type < WM_T_82543)
   7362 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7363 	else
   7364 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7365 
   7366 	wm_init_tx_descs(sc, txq);
   7367 	wm_init_tx_regs(sc, wmq, txq);
   7368 	wm_init_tx_buffer(sc, txq);
   7369 
   7370 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7371 	txq->txq_sending = false;
   7372 }
   7373 
   7374 static void
   7375 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7376     struct wm_rxqueue *rxq)
   7377 {
   7378 
   7379 	KASSERT(mutex_owned(rxq->rxq_lock));
   7380 
   7381 	/*
   7382 	 * Initialize the receive descriptor and receive job
   7383 	 * descriptor rings.
   7384 	 */
   7385 	if (sc->sc_type < WM_T_82543) {
   7386 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7387 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7388 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7389 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7390 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7391 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7392 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7393 
   7394 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7395 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7396 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7397 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7398 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7399 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7400 	} else {
   7401 		int qid = wmq->wmq_id;
   7402 
   7403 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7404 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7405 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7406 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7407 
   7408 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7409 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7410 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7411 
   7412 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7413 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7414 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7415 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7416 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7417 			    | RXDCTL_WTHRESH(1));
   7418 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7419 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7420 		} else {
   7421 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7422 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7423 			/* XXX should update with AIM? */
   7424 			CSR_WRITE(sc, WMREG_RDTR,
   7425 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7426 			/* MUST be same */
   7427 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7428 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7429 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7430 		}
   7431 	}
   7432 }
   7433 
   7434 static int
   7435 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7436 {
   7437 	struct wm_rxsoft *rxs;
   7438 	int error, i;
   7439 
   7440 	KASSERT(mutex_owned(rxq->rxq_lock));
   7441 
   7442 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7443 		rxs = &rxq->rxq_soft[i];
   7444 		if (rxs->rxs_mbuf == NULL) {
   7445 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7446 				log(LOG_ERR, "%s: unable to allocate or map "
   7447 				    "rx buffer %d, error = %d\n",
   7448 				    device_xname(sc->sc_dev), i, error);
   7449 				/*
   7450 				 * XXX Should attempt to run with fewer receive
   7451 				 * XXX buffers instead of just failing.
   7452 				 */
   7453 				wm_rxdrain(rxq);
   7454 				return ENOMEM;
   7455 			}
   7456 		} else {
   7457 			/*
   7458 			 * For 82575 and 82576, the RX descriptors must be
   7459 			 * initialized after the setting of RCTL.EN in
   7460 			 * wm_set_filter()
   7461 			 */
   7462 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7463 				wm_init_rxdesc(rxq, i);
   7464 		}
   7465 	}
   7466 	rxq->rxq_ptr = 0;
   7467 	rxq->rxq_discard = 0;
   7468 	WM_RXCHAIN_RESET(rxq);
   7469 
   7470 	return 0;
   7471 }
   7472 
   7473 static int
   7474 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7475     struct wm_rxqueue *rxq)
   7476 {
   7477 
   7478 	KASSERT(mutex_owned(rxq->rxq_lock));
   7479 
   7480 	/*
   7481 	 * Set up some register offsets that are different between
   7482 	 * the i82542 and the i82543 and later chips.
   7483 	 */
   7484 	if (sc->sc_type < WM_T_82543)
   7485 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7486 	else
   7487 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7488 
   7489 	wm_init_rx_regs(sc, wmq, rxq);
   7490 	return wm_init_rx_buffer(sc, rxq);
   7491 }
   7492 
   7493 /*
   7494  * wm_init_quques:
   7495  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7496  */
   7497 static int
   7498 wm_init_txrx_queues(struct wm_softc *sc)
   7499 {
   7500 	int i, error = 0;
   7501 
   7502 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7503 		device_xname(sc->sc_dev), __func__));
   7504 
   7505 	for (i = 0; i < sc->sc_nqueues; i++) {
   7506 		struct wm_queue *wmq = &sc->sc_queue[i];
   7507 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7508 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7509 
   7510 		/*
   7511 		 * TODO
   7512 		 * Currently, use constant variable instead of AIM.
   7513 		 * Furthermore, the interrupt interval of multiqueue which use
   7514 		 * polling mode is less than default value.
   7515 		 * More tuning and AIM are required.
   7516 		 */
   7517 		if (wm_is_using_multiqueue(sc))
   7518 			wmq->wmq_itr = 50;
   7519 		else
   7520 			wmq->wmq_itr = sc->sc_itr_init;
   7521 		wmq->wmq_set_itr = true;
   7522 
   7523 		mutex_enter(txq->txq_lock);
   7524 		wm_init_tx_queue(sc, wmq, txq);
   7525 		mutex_exit(txq->txq_lock);
   7526 
   7527 		mutex_enter(rxq->rxq_lock);
   7528 		error = wm_init_rx_queue(sc, wmq, rxq);
   7529 		mutex_exit(rxq->rxq_lock);
   7530 		if (error)
   7531 			break;
   7532 	}
   7533 
   7534 	return error;
   7535 }
   7536 
   7537 /*
   7538  * wm_tx_offload:
   7539  *
   7540  *	Set up TCP/IP checksumming parameters for the
   7541  *	specified packet.
   7542  */
   7543 static void
   7544 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7545     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7546 {
   7547 	struct mbuf *m0 = txs->txs_mbuf;
   7548 	struct livengood_tcpip_ctxdesc *t;
   7549 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7550 	uint32_t ipcse;
   7551 	struct ether_header *eh;
   7552 	int offset, iphl;
   7553 	uint8_t fields;
   7554 
   7555 	/*
   7556 	 * XXX It would be nice if the mbuf pkthdr had offset
   7557 	 * fields for the protocol headers.
   7558 	 */
   7559 
   7560 	eh = mtod(m0, struct ether_header *);
   7561 	switch (htons(eh->ether_type)) {
   7562 	case ETHERTYPE_IP:
   7563 	case ETHERTYPE_IPV6:
   7564 		offset = ETHER_HDR_LEN;
   7565 		break;
   7566 
   7567 	case ETHERTYPE_VLAN:
   7568 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7569 		break;
   7570 
   7571 	default:
   7572 		/* Don't support this protocol or encapsulation. */
   7573  		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7574  		txq->txq_last_hw_ipcs = 0;
   7575  		txq->txq_last_hw_tucs = 0;
   7576 		*fieldsp = 0;
   7577 		*cmdp = 0;
   7578 		return;
   7579 	}
   7580 
   7581 	if ((m0->m_pkthdr.csum_flags &
   7582 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7583 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7584 	} else
   7585 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7586 
   7587 	ipcse = offset + iphl - 1;
   7588 
   7589 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7590 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7591 	seg = 0;
   7592 	fields = 0;
   7593 
   7594 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7595 		int hlen = offset + iphl;
   7596 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7597 
   7598 		if (__predict_false(m0->m_len <
   7599 				    (hlen + sizeof(struct tcphdr)))) {
   7600 			/*
   7601 			 * TCP/IP headers are not in the first mbuf; we need
   7602 			 * to do this the slow and painful way. Let's just
   7603 			 * hope this doesn't happen very often.
   7604 			 */
   7605 			struct tcphdr th;
   7606 
   7607 			WM_Q_EVCNT_INCR(txq, tsopain);
   7608 
   7609 			m_copydata(m0, hlen, sizeof(th), &th);
   7610 			if (v4) {
   7611 				struct ip ip;
   7612 
   7613 				m_copydata(m0, offset, sizeof(ip), &ip);
   7614 				ip.ip_len = 0;
   7615 				m_copyback(m0,
   7616 				    offset + offsetof(struct ip, ip_len),
   7617 				    sizeof(ip.ip_len), &ip.ip_len);
   7618 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7619 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7620 			} else {
   7621 				struct ip6_hdr ip6;
   7622 
   7623 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7624 				ip6.ip6_plen = 0;
   7625 				m_copyback(m0,
   7626 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7627 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7628 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7629 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7630 			}
   7631 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7632 			    sizeof(th.th_sum), &th.th_sum);
   7633 
   7634 			hlen += th.th_off << 2;
   7635 		} else {
   7636 			/*
   7637 			 * TCP/IP headers are in the first mbuf; we can do
   7638 			 * this the easy way.
   7639 			 */
   7640 			struct tcphdr *th;
   7641 
   7642 			if (v4) {
   7643 				struct ip *ip =
   7644 				    (void *)(mtod(m0, char *) + offset);
   7645 				th = (void *)(mtod(m0, char *) + hlen);
   7646 
   7647 				ip->ip_len = 0;
   7648 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7649 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7650 			} else {
   7651 				struct ip6_hdr *ip6 =
   7652 				    (void *)(mtod(m0, char *) + offset);
   7653 				th = (void *)(mtod(m0, char *) + hlen);
   7654 
   7655 				ip6->ip6_plen = 0;
   7656 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7657 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7658 			}
   7659 			hlen += th->th_off << 2;
   7660 		}
   7661 
   7662 		if (v4) {
   7663 			WM_Q_EVCNT_INCR(txq, tso);
   7664 			cmdlen |= WTX_TCPIP_CMD_IP;
   7665 		} else {
   7666 			WM_Q_EVCNT_INCR(txq, tso6);
   7667 			ipcse = 0;
   7668 		}
   7669 		cmd |= WTX_TCPIP_CMD_TSE;
   7670 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7671 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7672 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7673 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7674 	}
   7675 
   7676 	/*
   7677 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7678 	 * offload feature, if we load the context descriptor, we
   7679 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7680 	 */
   7681 
   7682 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7683 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7684 	    WTX_TCPIP_IPCSE(ipcse);
   7685 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7686 		WM_Q_EVCNT_INCR(txq, ipsum);
   7687 		fields |= WTX_IXSM;
   7688 	}
   7689 
   7690 	offset += iphl;
   7691 
   7692 	if (m0->m_pkthdr.csum_flags &
   7693 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7694 		WM_Q_EVCNT_INCR(txq, tusum);
   7695 		fields |= WTX_TXSM;
   7696 		tucs = WTX_TCPIP_TUCSS(offset) |
   7697 		    WTX_TCPIP_TUCSO(offset +
   7698 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7699 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7700 	} else if ((m0->m_pkthdr.csum_flags &
   7701 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7702 		WM_Q_EVCNT_INCR(txq, tusum6);
   7703 		fields |= WTX_TXSM;
   7704 		tucs = WTX_TCPIP_TUCSS(offset) |
   7705 		    WTX_TCPIP_TUCSO(offset +
   7706 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7707 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7708 	} else {
   7709 		/* Just initialize it to a valid TCP context. */
   7710 		tucs = WTX_TCPIP_TUCSS(offset) |
   7711 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7712 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7713 	}
   7714 
   7715 	*cmdp = cmd;
   7716 	*fieldsp = fields;
   7717 
   7718 	/*
   7719 	 * We don't have to write context descriptor for every packet
   7720 	 * except for 82574. For 82574, we must write context descriptor
   7721 	 * for every packet when we use two descriptor queues.
   7722 	 *
   7723 	 * The 82574L can only remember the *last* context used
   7724 	 * regardless of queue that it was use for.  We cannot reuse
   7725 	 * contexts on this hardware platform and must generate a new
   7726 	 * context every time.  82574L hardware spec, section 7.2.6,
   7727 	 * second note.
   7728 	 */
   7729 	if (sc->sc_nqueues < 2) {
   7730 		/*
   7731 	 	 *
   7732 	  	 * Setting up new checksum offload context for every
   7733 		 * frames takes a lot of processing time for hardware.
   7734 		 * This also reduces performance a lot for small sized
   7735 		 * frames so avoid it if driver can use previously
   7736 		 * configured checksum offload context.
   7737 		 * For TSO, in theory we can use the same TSO context only if
   7738 		 * frame is the same type(IP/TCP) and the same MSS. However
   7739 		 * checking whether a frame has the same IP/TCP structure is
   7740 		 * hard thing so just ignore that and always restablish a
   7741 		 * new TSO context.
   7742 	  	 */
   7743 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   7744 		    == 0) {
   7745 			if (txq->txq_last_hw_cmd == cmd &&
   7746 			    txq->txq_last_hw_fields == fields &&
   7747 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   7748 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   7749 				WM_Q_EVCNT_INCR(txq, skipcontext);
   7750 				return;
   7751 			}
   7752 		}
   7753 
   7754 	 	txq->txq_last_hw_cmd = cmd;
   7755  		txq->txq_last_hw_fields = fields;
   7756  		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   7757 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   7758 	}
   7759 
   7760 	/* Fill in the context descriptor. */
   7761 	t = (struct livengood_tcpip_ctxdesc *)
   7762 	    &txq->txq_descs[txq->txq_next];
   7763 	t->tcpip_ipcs = htole32(ipcs);
   7764 	t->tcpip_tucs = htole32(tucs);
   7765 	t->tcpip_cmdlen = htole32(cmdlen);
   7766 	t->tcpip_seg = htole32(seg);
   7767 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7768 
   7769 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7770 	txs->txs_ndesc++;
   7771 }
   7772 
   7773 static inline int
   7774 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7775 {
   7776 	struct wm_softc *sc = ifp->if_softc;
   7777 	u_int cpuid = cpu_index(curcpu());
   7778 
   7779 	/*
   7780 	 * Currently, simple distribute strategy.
   7781 	 * TODO:
   7782 	 * distribute by flowid(RSS has value).
   7783 	 */
   7784 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7785 }
   7786 
   7787 static inline bool
   7788 wm_linkdown_discard(struct wm_txqueue *txq)
   7789 {
   7790 
   7791 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   7792 		return true;
   7793 
   7794 	return false;
   7795 }
   7796 
   7797 /*
   7798  * wm_start:		[ifnet interface function]
   7799  *
   7800  *	Start packet transmission on the interface.
   7801  */
   7802 static void
   7803 wm_start(struct ifnet *ifp)
   7804 {
   7805 	struct wm_softc *sc = ifp->if_softc;
   7806 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7807 
   7808 #ifdef WM_MPSAFE
   7809 	KASSERT(if_is_mpsafe(ifp));
   7810 #endif
   7811 	/*
   7812 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7813 	 */
   7814 
   7815 	mutex_enter(txq->txq_lock);
   7816 	if (!txq->txq_stopping)
   7817 		wm_start_locked(ifp);
   7818 	mutex_exit(txq->txq_lock);
   7819 }
   7820 
   7821 static void
   7822 wm_start_locked(struct ifnet *ifp)
   7823 {
   7824 	struct wm_softc *sc = ifp->if_softc;
   7825 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7826 
   7827 	wm_send_common_locked(ifp, txq, false);
   7828 }
   7829 
   7830 static int
   7831 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7832 {
   7833 	int qid;
   7834 	struct wm_softc *sc = ifp->if_softc;
   7835 	struct wm_txqueue *txq;
   7836 
   7837 	qid = wm_select_txqueue(ifp, m);
   7838 	txq = &sc->sc_queue[qid].wmq_txq;
   7839 
   7840 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7841 		m_freem(m);
   7842 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7843 		return ENOBUFS;
   7844 	}
   7845 
   7846 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7847 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7848 	if (m->m_flags & M_MCAST)
   7849 		if_statinc_ref(nsr, if_omcasts);
   7850 	IF_STAT_PUTREF(ifp);
   7851 
   7852 	if (mutex_tryenter(txq->txq_lock)) {
   7853 		if (!txq->txq_stopping)
   7854 			wm_transmit_locked(ifp, txq);
   7855 		mutex_exit(txq->txq_lock);
   7856 	}
   7857 
   7858 	return 0;
   7859 }
   7860 
   7861 static void
   7862 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7863 {
   7864 
   7865 	wm_send_common_locked(ifp, txq, true);
   7866 }
   7867 
   7868 static void
   7869 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7870     bool is_transmit)
   7871 {
   7872 	struct wm_softc *sc = ifp->if_softc;
   7873 	struct mbuf *m0;
   7874 	struct wm_txsoft *txs;
   7875 	bus_dmamap_t dmamap;
   7876 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7877 	bus_addr_t curaddr;
   7878 	bus_size_t seglen, curlen;
   7879 	uint32_t cksumcmd;
   7880 	uint8_t cksumfields;
   7881 	bool remap = true;
   7882 
   7883 	KASSERT(mutex_owned(txq->txq_lock));
   7884 
   7885 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7886 		return;
   7887 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7888 		return;
   7889 
   7890 	if (__predict_false(wm_linkdown_discard(txq))) {
   7891 		do {
   7892 			if (is_transmit)
   7893 				m0 = pcq_get(txq->txq_interq);
   7894 			else
   7895 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   7896 			/*
   7897 			 * increment successed packet counter as in the case
   7898 			 * which the packet is discarded by link down PHY.
   7899 			 */
   7900 			if (m0 != NULL)
   7901 				if_statinc(ifp, if_opackets);
   7902 			m_freem(m0);
   7903 		} while (m0 != NULL);
   7904 		return;
   7905 	}
   7906 
   7907 	/* Remember the previous number of free descriptors. */
   7908 	ofree = txq->txq_free;
   7909 
   7910 	/*
   7911 	 * Loop through the send queue, setting up transmit descriptors
   7912 	 * until we drain the queue, or use up all available transmit
   7913 	 * descriptors.
   7914 	 */
   7915 	for (;;) {
   7916 		m0 = NULL;
   7917 
   7918 		/* Get a work queue entry. */
   7919 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7920 			wm_txeof(txq, UINT_MAX);
   7921 			if (txq->txq_sfree == 0) {
   7922 				DPRINTF(sc, WM_DEBUG_TX,
   7923 				    ("%s: TX: no free job descriptors\n",
   7924 					device_xname(sc->sc_dev)));
   7925 				WM_Q_EVCNT_INCR(txq, txsstall);
   7926 				break;
   7927 			}
   7928 		}
   7929 
   7930 		/* Grab a packet off the queue. */
   7931 		if (is_transmit)
   7932 			m0 = pcq_get(txq->txq_interq);
   7933 		else
   7934 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7935 		if (m0 == NULL)
   7936 			break;
   7937 
   7938 		DPRINTF(sc, WM_DEBUG_TX,
   7939 		    ("%s: TX: have packet to transmit: %p\n",
   7940 			device_xname(sc->sc_dev), m0));
   7941 
   7942 		txs = &txq->txq_soft[txq->txq_snext];
   7943 		dmamap = txs->txs_dmamap;
   7944 
   7945 		use_tso = (m0->m_pkthdr.csum_flags &
   7946 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7947 
   7948 		/*
   7949 		 * So says the Linux driver:
   7950 		 * The controller does a simple calculation to make sure
   7951 		 * there is enough room in the FIFO before initiating the
   7952 		 * DMA for each buffer. The calc is:
   7953 		 *	4 = ceil(buffer len / MSS)
   7954 		 * To make sure we don't overrun the FIFO, adjust the max
   7955 		 * buffer len if the MSS drops.
   7956 		 */
   7957 		dmamap->dm_maxsegsz =
   7958 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7959 		    ? m0->m_pkthdr.segsz << 2
   7960 		    : WTX_MAX_LEN;
   7961 
   7962 		/*
   7963 		 * Load the DMA map.  If this fails, the packet either
   7964 		 * didn't fit in the allotted number of segments, or we
   7965 		 * were short on resources.  For the too-many-segments
   7966 		 * case, we simply report an error and drop the packet,
   7967 		 * since we can't sanely copy a jumbo packet to a single
   7968 		 * buffer.
   7969 		 */
   7970 retry:
   7971 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7972 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7973 		if (__predict_false(error)) {
   7974 			if (error == EFBIG) {
   7975 				if (remap == true) {
   7976 					struct mbuf *m;
   7977 
   7978 					remap = false;
   7979 					m = m_defrag(m0, M_NOWAIT);
   7980 					if (m != NULL) {
   7981 						WM_Q_EVCNT_INCR(txq, defrag);
   7982 						m0 = m;
   7983 						goto retry;
   7984 					}
   7985 				}
   7986 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7987 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7988 				    "DMA segments, dropping...\n",
   7989 				    device_xname(sc->sc_dev));
   7990 				wm_dump_mbuf_chain(sc, m0);
   7991 				m_freem(m0);
   7992 				continue;
   7993 			}
   7994 			/* Short on resources, just stop for now. */
   7995 			DPRINTF(sc, WM_DEBUG_TX,
   7996 			    ("%s: TX: dmamap load failed: %d\n",
   7997 				device_xname(sc->sc_dev), error));
   7998 			break;
   7999 		}
   8000 
   8001 		segs_needed = dmamap->dm_nsegs;
   8002 		if (use_tso) {
   8003 			/* For sentinel descriptor; see below. */
   8004 			segs_needed++;
   8005 		}
   8006 
   8007 		/*
   8008 		 * Ensure we have enough descriptors free to describe
   8009 		 * the packet. Note, we always reserve one descriptor
   8010 		 * at the end of the ring due to the semantics of the
   8011 		 * TDT register, plus one more in the event we need
   8012 		 * to load offload context.
   8013 		 */
   8014 		if (segs_needed > txq->txq_free - 2) {
   8015 			/*
   8016 			 * Not enough free descriptors to transmit this
   8017 			 * packet.  We haven't committed anything yet,
   8018 			 * so just unload the DMA map, put the packet
   8019 			 * pack on the queue, and punt. Notify the upper
   8020 			 * layer that there are no more slots left.
   8021 			 */
   8022 			DPRINTF(sc, WM_DEBUG_TX,
   8023 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8024 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8025 				segs_needed, txq->txq_free - 1));
   8026 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8027 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8028 			WM_Q_EVCNT_INCR(txq, txdstall);
   8029 			break;
   8030 		}
   8031 
   8032 		/*
   8033 		 * Check for 82547 Tx FIFO bug. We need to do this
   8034 		 * once we know we can transmit the packet, since we
   8035 		 * do some internal FIFO space accounting here.
   8036 		 */
   8037 		if (sc->sc_type == WM_T_82547 &&
   8038 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8039 			DPRINTF(sc, WM_DEBUG_TX,
   8040 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8041 				device_xname(sc->sc_dev)));
   8042 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8043 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8044 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8045 			break;
   8046 		}
   8047 
   8048 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8049 
   8050 		DPRINTF(sc, WM_DEBUG_TX,
   8051 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8052 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8053 
   8054 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8055 
   8056 		/*
   8057 		 * Store a pointer to the packet so that we can free it
   8058 		 * later.
   8059 		 *
   8060 		 * Initially, we consider the number of descriptors the
   8061 		 * packet uses the number of DMA segments.  This may be
   8062 		 * incremented by 1 if we do checksum offload (a descriptor
   8063 		 * is used to set the checksum context).
   8064 		 */
   8065 		txs->txs_mbuf = m0;
   8066 		txs->txs_firstdesc = txq->txq_next;
   8067 		txs->txs_ndesc = segs_needed;
   8068 
   8069 		/* Set up offload parameters for this packet. */
   8070 		if (m0->m_pkthdr.csum_flags &
   8071 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8072 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8073 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8074 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8075 		} else {
   8076  			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8077  			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8078 			cksumcmd = 0;
   8079 			cksumfields = 0;
   8080 		}
   8081 
   8082 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8083 
   8084 		/* Sync the DMA map. */
   8085 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8086 		    BUS_DMASYNC_PREWRITE);
   8087 
   8088 		/* Initialize the transmit descriptor. */
   8089 		for (nexttx = txq->txq_next, seg = 0;
   8090 		     seg < dmamap->dm_nsegs; seg++) {
   8091 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8092 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8093 			     seglen != 0;
   8094 			     curaddr += curlen, seglen -= curlen,
   8095 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8096 				curlen = seglen;
   8097 
   8098 				/*
   8099 				 * So says the Linux driver:
   8100 				 * Work around for premature descriptor
   8101 				 * write-backs in TSO mode.  Append a
   8102 				 * 4-byte sentinel descriptor.
   8103 				 */
   8104 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8105 				    curlen > 8)
   8106 					curlen -= 4;
   8107 
   8108 				wm_set_dma_addr(
   8109 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8110 				txq->txq_descs[nexttx].wtx_cmdlen
   8111 				    = htole32(cksumcmd | curlen);
   8112 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8113 				    = 0;
   8114 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8115 				    = cksumfields;
   8116 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8117 				lasttx = nexttx;
   8118 
   8119 				DPRINTF(sc, WM_DEBUG_TX,
   8120 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8121 					"len %#04zx\n",
   8122 					device_xname(sc->sc_dev), nexttx,
   8123 					(uint64_t)curaddr, curlen));
   8124 			}
   8125 		}
   8126 
   8127 		KASSERT(lasttx != -1);
   8128 
   8129 		/*
   8130 		 * Set up the command byte on the last descriptor of
   8131 		 * the packet. If we're in the interrupt delay window,
   8132 		 * delay the interrupt.
   8133 		 */
   8134 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8135 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8136 
   8137 		/*
   8138 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8139 		 * up the descriptor to encapsulate the packet for us.
   8140 		 *
   8141 		 * This is only valid on the last descriptor of the packet.
   8142 		 */
   8143 		if (vlan_has_tag(m0)) {
   8144 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8145 			    htole32(WTX_CMD_VLE);
   8146 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8147 			    = htole16(vlan_get_tag(m0));
   8148 		}
   8149 
   8150 		txs->txs_lastdesc = lasttx;
   8151 
   8152 		DPRINTF(sc, WM_DEBUG_TX,
   8153 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8154 			device_xname(sc->sc_dev),
   8155 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8156 
   8157 		/* Sync the descriptors we're using. */
   8158 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8159 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8160 
   8161 		/* Give the packet to the chip. */
   8162 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8163 
   8164 		DPRINTF(sc, WM_DEBUG_TX,
   8165 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8166 
   8167 		DPRINTF(sc, WM_DEBUG_TX,
   8168 		    ("%s: TX: finished transmitting packet, job %d\n",
   8169 			device_xname(sc->sc_dev), txq->txq_snext));
   8170 
   8171 		/* Advance the tx pointer. */
   8172 		txq->txq_free -= txs->txs_ndesc;
   8173 		txq->txq_next = nexttx;
   8174 
   8175 		txq->txq_sfree--;
   8176 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8177 
   8178 		/* Pass the packet to any BPF listeners. */
   8179 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8180 	}
   8181 
   8182 	if (m0 != NULL) {
   8183 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8184 		WM_Q_EVCNT_INCR(txq, descdrop);
   8185 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8186 			__func__));
   8187 		m_freem(m0);
   8188 	}
   8189 
   8190 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8191 		/* No more slots; notify upper layer. */
   8192 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8193 	}
   8194 
   8195 	if (txq->txq_free != ofree) {
   8196 		/* Set a watchdog timer in case the chip flakes out. */
   8197 		txq->txq_lastsent = time_uptime;
   8198 		txq->txq_sending = true;
   8199 	}
   8200 }
   8201 
   8202 /*
   8203  * wm_nq_tx_offload:
   8204  *
   8205  *	Set up TCP/IP checksumming parameters for the
   8206  *	specified packet, for NEWQUEUE devices
   8207  */
   8208 static void
   8209 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8210     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8211 {
   8212 	struct mbuf *m0 = txs->txs_mbuf;
   8213 	uint32_t vl_len, mssidx, cmdc;
   8214 	struct ether_header *eh;
   8215 	int offset, iphl;
   8216 
   8217 	/*
   8218 	 * XXX It would be nice if the mbuf pkthdr had offset
   8219 	 * fields for the protocol headers.
   8220 	 */
   8221 	*cmdlenp = 0;
   8222 	*fieldsp = 0;
   8223 
   8224 	eh = mtod(m0, struct ether_header *);
   8225 	switch (htons(eh->ether_type)) {
   8226 	case ETHERTYPE_IP:
   8227 	case ETHERTYPE_IPV6:
   8228 		offset = ETHER_HDR_LEN;
   8229 		break;
   8230 
   8231 	case ETHERTYPE_VLAN:
   8232 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8233 		break;
   8234 
   8235 	default:
   8236 		/* Don't support this protocol or encapsulation. */
   8237 		*do_csum = false;
   8238 		return;
   8239 	}
   8240 	*do_csum = true;
   8241 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8242 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8243 
   8244 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8245 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8246 
   8247 	if ((m0->m_pkthdr.csum_flags &
   8248 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8249 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8250 	} else {
   8251 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8252 	}
   8253 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8254 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8255 
   8256 	if (vlan_has_tag(m0)) {
   8257 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8258 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8259 		*cmdlenp |= NQTX_CMD_VLE;
   8260 	}
   8261 
   8262 	mssidx = 0;
   8263 
   8264 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8265 		int hlen = offset + iphl;
   8266 		int tcp_hlen;
   8267 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8268 
   8269 		if (__predict_false(m0->m_len <
   8270 				    (hlen + sizeof(struct tcphdr)))) {
   8271 			/*
   8272 			 * TCP/IP headers are not in the first mbuf; we need
   8273 			 * to do this the slow and painful way. Let's just
   8274 			 * hope this doesn't happen very often.
   8275 			 */
   8276 			struct tcphdr th;
   8277 
   8278 			WM_Q_EVCNT_INCR(txq, tsopain);
   8279 
   8280 			m_copydata(m0, hlen, sizeof(th), &th);
   8281 			if (v4) {
   8282 				struct ip ip;
   8283 
   8284 				m_copydata(m0, offset, sizeof(ip), &ip);
   8285 				ip.ip_len = 0;
   8286 				m_copyback(m0,
   8287 				    offset + offsetof(struct ip, ip_len),
   8288 				    sizeof(ip.ip_len), &ip.ip_len);
   8289 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8290 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8291 			} else {
   8292 				struct ip6_hdr ip6;
   8293 
   8294 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8295 				ip6.ip6_plen = 0;
   8296 				m_copyback(m0,
   8297 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8298 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8299 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8300 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8301 			}
   8302 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8303 			    sizeof(th.th_sum), &th.th_sum);
   8304 
   8305 			tcp_hlen = th.th_off << 2;
   8306 		} else {
   8307 			/*
   8308 			 * TCP/IP headers are in the first mbuf; we can do
   8309 			 * this the easy way.
   8310 			 */
   8311 			struct tcphdr *th;
   8312 
   8313 			if (v4) {
   8314 				struct ip *ip =
   8315 				    (void *)(mtod(m0, char *) + offset);
   8316 				th = (void *)(mtod(m0, char *) + hlen);
   8317 
   8318 				ip->ip_len = 0;
   8319 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8320 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8321 			} else {
   8322 				struct ip6_hdr *ip6 =
   8323 				    (void *)(mtod(m0, char *) + offset);
   8324 				th = (void *)(mtod(m0, char *) + hlen);
   8325 
   8326 				ip6->ip6_plen = 0;
   8327 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8328 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8329 			}
   8330 			tcp_hlen = th->th_off << 2;
   8331 		}
   8332 		hlen += tcp_hlen;
   8333 		*cmdlenp |= NQTX_CMD_TSE;
   8334 
   8335 		if (v4) {
   8336 			WM_Q_EVCNT_INCR(txq, tso);
   8337 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8338 		} else {
   8339 			WM_Q_EVCNT_INCR(txq, tso6);
   8340 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8341 		}
   8342 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8343 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8344 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8345 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8346 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8347 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8348 	} else {
   8349 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8350 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8351 	}
   8352 
   8353 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8354 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8355 		cmdc |= NQTXC_CMD_IP4;
   8356 	}
   8357 
   8358 	if (m0->m_pkthdr.csum_flags &
   8359 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8360 		WM_Q_EVCNT_INCR(txq, tusum);
   8361 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8362 			cmdc |= NQTXC_CMD_TCP;
   8363 		else
   8364 			cmdc |= NQTXC_CMD_UDP;
   8365 
   8366 		cmdc |= NQTXC_CMD_IP4;
   8367 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8368 	}
   8369 	if (m0->m_pkthdr.csum_flags &
   8370 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8371 		WM_Q_EVCNT_INCR(txq, tusum6);
   8372 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8373 			cmdc |= NQTXC_CMD_TCP;
   8374 		else
   8375 			cmdc |= NQTXC_CMD_UDP;
   8376 
   8377 		cmdc |= NQTXC_CMD_IP6;
   8378 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8379 	}
   8380 
   8381 	/*
   8382 	 * We don't have to write context descriptor for every packet to
   8383 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8384 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8385 	 * controllers.
   8386 	 * It would be overhead to write context descriptor for every packet,
   8387 	 * however it does not cause problems.
   8388 	 */
   8389 	/* Fill in the context descriptor. */
   8390 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8391 	    htole32(vl_len);
   8392 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8393 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8394 	    htole32(cmdc);
   8395 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8396 	    htole32(mssidx);
   8397 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8398 	DPRINTF(sc, WM_DEBUG_TX,
   8399 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8400 		txq->txq_next, 0, vl_len));
   8401 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8402 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8403 	txs->txs_ndesc++;
   8404 }
   8405 
   8406 /*
   8407  * wm_nq_start:		[ifnet interface function]
   8408  *
   8409  *	Start packet transmission on the interface for NEWQUEUE devices
   8410  */
   8411 static void
   8412 wm_nq_start(struct ifnet *ifp)
   8413 {
   8414 	struct wm_softc *sc = ifp->if_softc;
   8415 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8416 
   8417 #ifdef WM_MPSAFE
   8418 	KASSERT(if_is_mpsafe(ifp));
   8419 #endif
   8420 	/*
   8421 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8422 	 */
   8423 
   8424 	mutex_enter(txq->txq_lock);
   8425 	if (!txq->txq_stopping)
   8426 		wm_nq_start_locked(ifp);
   8427 	mutex_exit(txq->txq_lock);
   8428 }
   8429 
   8430 static void
   8431 wm_nq_start_locked(struct ifnet *ifp)
   8432 {
   8433 	struct wm_softc *sc = ifp->if_softc;
   8434 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8435 
   8436 	wm_nq_send_common_locked(ifp, txq, false);
   8437 }
   8438 
   8439 static int
   8440 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8441 {
   8442 	int qid;
   8443 	struct wm_softc *sc = ifp->if_softc;
   8444 	struct wm_txqueue *txq;
   8445 
   8446 	qid = wm_select_txqueue(ifp, m);
   8447 	txq = &sc->sc_queue[qid].wmq_txq;
   8448 
   8449 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8450 		m_freem(m);
   8451 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8452 		return ENOBUFS;
   8453 	}
   8454 
   8455 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8456 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8457 	if (m->m_flags & M_MCAST)
   8458 		if_statinc_ref(nsr, if_omcasts);
   8459 	IF_STAT_PUTREF(ifp);
   8460 
   8461 	/*
   8462 	 * The situations which this mutex_tryenter() fails at running time
   8463 	 * are below two patterns.
   8464 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8465 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8466 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8467 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8468 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8469 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8470 	 * stuck, either.
   8471 	 */
   8472 	if (mutex_tryenter(txq->txq_lock)) {
   8473 		if (!txq->txq_stopping)
   8474 			wm_nq_transmit_locked(ifp, txq);
   8475 		mutex_exit(txq->txq_lock);
   8476 	}
   8477 
   8478 	return 0;
   8479 }
   8480 
   8481 static void
   8482 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8483 {
   8484 
   8485 	wm_nq_send_common_locked(ifp, txq, true);
   8486 }
   8487 
   8488 static void
   8489 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8490     bool is_transmit)
   8491 {
   8492 	struct wm_softc *sc = ifp->if_softc;
   8493 	struct mbuf *m0;
   8494 	struct wm_txsoft *txs;
   8495 	bus_dmamap_t dmamap;
   8496 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8497 	bool do_csum, sent;
   8498 	bool remap = true;
   8499 
   8500 	KASSERT(mutex_owned(txq->txq_lock));
   8501 
   8502 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8503 		return;
   8504 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8505 		return;
   8506 
   8507 	if (__predict_false(wm_linkdown_discard(txq))) {
   8508 		do {
   8509 			if (is_transmit)
   8510 				m0 = pcq_get(txq->txq_interq);
   8511 			else
   8512 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8513 			/*
   8514 			 * increment successed packet counter as in the case
   8515 			 * which the packet is discarded by link down PHY.
   8516 			 */
   8517 			if (m0 != NULL)
   8518 				if_statinc(ifp, if_opackets);
   8519 			m_freem(m0);
   8520 		} while (m0 != NULL);
   8521 		return;
   8522 	}
   8523 
   8524 	sent = false;
   8525 
   8526 	/*
   8527 	 * Loop through the send queue, setting up transmit descriptors
   8528 	 * until we drain the queue, or use up all available transmit
   8529 	 * descriptors.
   8530 	 */
   8531 	for (;;) {
   8532 		m0 = NULL;
   8533 
   8534 		/* Get a work queue entry. */
   8535 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8536 			wm_txeof(txq, UINT_MAX);
   8537 			if (txq->txq_sfree == 0) {
   8538 				DPRINTF(sc, WM_DEBUG_TX,
   8539 				    ("%s: TX: no free job descriptors\n",
   8540 					device_xname(sc->sc_dev)));
   8541 				WM_Q_EVCNT_INCR(txq, txsstall);
   8542 				break;
   8543 			}
   8544 		}
   8545 
   8546 		/* Grab a packet off the queue. */
   8547 		if (is_transmit)
   8548 			m0 = pcq_get(txq->txq_interq);
   8549 		else
   8550 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8551 		if (m0 == NULL)
   8552 			break;
   8553 
   8554 		DPRINTF(sc, WM_DEBUG_TX,
   8555 		    ("%s: TX: have packet to transmit: %p\n",
   8556 		    device_xname(sc->sc_dev), m0));
   8557 
   8558 		txs = &txq->txq_soft[txq->txq_snext];
   8559 		dmamap = txs->txs_dmamap;
   8560 
   8561 		/*
   8562 		 * Load the DMA map.  If this fails, the packet either
   8563 		 * didn't fit in the allotted number of segments, or we
   8564 		 * were short on resources.  For the too-many-segments
   8565 		 * case, we simply report an error and drop the packet,
   8566 		 * since we can't sanely copy a jumbo packet to a single
   8567 		 * buffer.
   8568 		 */
   8569 retry:
   8570 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8571 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8572 		if (__predict_false(error)) {
   8573 			if (error == EFBIG) {
   8574 				if (remap == true) {
   8575 					struct mbuf *m;
   8576 
   8577 					remap = false;
   8578 					m = m_defrag(m0, M_NOWAIT);
   8579 					if (m != NULL) {
   8580 						WM_Q_EVCNT_INCR(txq, defrag);
   8581 						m0 = m;
   8582 						goto retry;
   8583 					}
   8584 				}
   8585 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8586 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8587 				    "DMA segments, dropping...\n",
   8588 				    device_xname(sc->sc_dev));
   8589 				wm_dump_mbuf_chain(sc, m0);
   8590 				m_freem(m0);
   8591 				continue;
   8592 			}
   8593 			/* Short on resources, just stop for now. */
   8594 			DPRINTF(sc, WM_DEBUG_TX,
   8595 			    ("%s: TX: dmamap load failed: %d\n",
   8596 				device_xname(sc->sc_dev), error));
   8597 			break;
   8598 		}
   8599 
   8600 		segs_needed = dmamap->dm_nsegs;
   8601 
   8602 		/*
   8603 		 * Ensure we have enough descriptors free to describe
   8604 		 * the packet. Note, we always reserve one descriptor
   8605 		 * at the end of the ring due to the semantics of the
   8606 		 * TDT register, plus one more in the event we need
   8607 		 * to load offload context.
   8608 		 */
   8609 		if (segs_needed > txq->txq_free - 2) {
   8610 			/*
   8611 			 * Not enough free descriptors to transmit this
   8612 			 * packet.  We haven't committed anything yet,
   8613 			 * so just unload the DMA map, put the packet
   8614 			 * pack on the queue, and punt. Notify the upper
   8615 			 * layer that there are no more slots left.
   8616 			 */
   8617 			DPRINTF(sc, WM_DEBUG_TX,
   8618 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8619 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8620 				segs_needed, txq->txq_free - 1));
   8621 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8622 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8623 			WM_Q_EVCNT_INCR(txq, txdstall);
   8624 			break;
   8625 		}
   8626 
   8627 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8628 
   8629 		DPRINTF(sc, WM_DEBUG_TX,
   8630 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8631 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8632 
   8633 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8634 
   8635 		/*
   8636 		 * Store a pointer to the packet so that we can free it
   8637 		 * later.
   8638 		 *
   8639 		 * Initially, we consider the number of descriptors the
   8640 		 * packet uses the number of DMA segments.  This may be
   8641 		 * incremented by 1 if we do checksum offload (a descriptor
   8642 		 * is used to set the checksum context).
   8643 		 */
   8644 		txs->txs_mbuf = m0;
   8645 		txs->txs_firstdesc = txq->txq_next;
   8646 		txs->txs_ndesc = segs_needed;
   8647 
   8648 		/* Set up offload parameters for this packet. */
   8649 		uint32_t cmdlen, fields, dcmdlen;
   8650 		if (m0->m_pkthdr.csum_flags &
   8651 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8652 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8653 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8654 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8655 			    &do_csum);
   8656 		} else {
   8657 			do_csum = false;
   8658 			cmdlen = 0;
   8659 			fields = 0;
   8660 		}
   8661 
   8662 		/* Sync the DMA map. */
   8663 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8664 		    BUS_DMASYNC_PREWRITE);
   8665 
   8666 		/* Initialize the first transmit descriptor. */
   8667 		nexttx = txq->txq_next;
   8668 		if (!do_csum) {
   8669 			/* Setup a legacy descriptor */
   8670 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8671 			    dmamap->dm_segs[0].ds_addr);
   8672 			txq->txq_descs[nexttx].wtx_cmdlen =
   8673 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8674 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8675 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8676 			if (vlan_has_tag(m0)) {
   8677 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8678 				    htole32(WTX_CMD_VLE);
   8679 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8680 				    htole16(vlan_get_tag(m0));
   8681 			} else
   8682 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8683 
   8684 			dcmdlen = 0;
   8685 		} else {
   8686 			/* Setup an advanced data descriptor */
   8687 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8688 			    htole64(dmamap->dm_segs[0].ds_addr);
   8689 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8690 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8691 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8692 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8693 			    htole32(fields);
   8694 			DPRINTF(sc, WM_DEBUG_TX,
   8695 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8696 				device_xname(sc->sc_dev), nexttx,
   8697 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8698 			DPRINTF(sc, WM_DEBUG_TX,
   8699 			    ("\t 0x%08x%08x\n", fields,
   8700 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8701 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8702 		}
   8703 
   8704 		lasttx = nexttx;
   8705 		nexttx = WM_NEXTTX(txq, nexttx);
   8706 		/*
   8707 		 * Fill in the next descriptors. legacy or advanced format
   8708 		 * is the same here
   8709 		 */
   8710 		for (seg = 1; seg < dmamap->dm_nsegs;
   8711 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8712 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8713 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8714 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8715 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8716 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8717 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8718 			lasttx = nexttx;
   8719 
   8720 			DPRINTF(sc, WM_DEBUG_TX,
   8721 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8722 				device_xname(sc->sc_dev), nexttx,
   8723 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8724 				dmamap->dm_segs[seg].ds_len));
   8725 		}
   8726 
   8727 		KASSERT(lasttx != -1);
   8728 
   8729 		/*
   8730 		 * Set up the command byte on the last descriptor of
   8731 		 * the packet. If we're in the interrupt delay window,
   8732 		 * delay the interrupt.
   8733 		 */
   8734 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8735 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8736 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8737 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8738 
   8739 		txs->txs_lastdesc = lasttx;
   8740 
   8741 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8742 		    device_xname(sc->sc_dev),
   8743 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8744 
   8745 		/* Sync the descriptors we're using. */
   8746 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8747 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8748 
   8749 		/* Give the packet to the chip. */
   8750 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8751 		sent = true;
   8752 
   8753 		DPRINTF(sc, WM_DEBUG_TX,
   8754 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8755 
   8756 		DPRINTF(sc, WM_DEBUG_TX,
   8757 		    ("%s: TX: finished transmitting packet, job %d\n",
   8758 			device_xname(sc->sc_dev), txq->txq_snext));
   8759 
   8760 		/* Advance the tx pointer. */
   8761 		txq->txq_free -= txs->txs_ndesc;
   8762 		txq->txq_next = nexttx;
   8763 
   8764 		txq->txq_sfree--;
   8765 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8766 
   8767 		/* Pass the packet to any BPF listeners. */
   8768 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8769 	}
   8770 
   8771 	if (m0 != NULL) {
   8772 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8773 		WM_Q_EVCNT_INCR(txq, descdrop);
   8774 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8775 			__func__));
   8776 		m_freem(m0);
   8777 	}
   8778 
   8779 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8780 		/* No more slots; notify upper layer. */
   8781 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8782 	}
   8783 
   8784 	if (sent) {
   8785 		/* Set a watchdog timer in case the chip flakes out. */
   8786 		txq->txq_lastsent = time_uptime;
   8787 		txq->txq_sending = true;
   8788 	}
   8789 }
   8790 
   8791 static void
   8792 wm_deferred_start_locked(struct wm_txqueue *txq)
   8793 {
   8794 	struct wm_softc *sc = txq->txq_sc;
   8795 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8796 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8797 	int qid = wmq->wmq_id;
   8798 
   8799 	KASSERT(mutex_owned(txq->txq_lock));
   8800 
   8801 	if (txq->txq_stopping) {
   8802 		mutex_exit(txq->txq_lock);
   8803 		return;
   8804 	}
   8805 
   8806 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8807 		/* XXX need for ALTQ or one CPU system */
   8808 		if (qid == 0)
   8809 			wm_nq_start_locked(ifp);
   8810 		wm_nq_transmit_locked(ifp, txq);
   8811 	} else {
   8812 		/* XXX need for ALTQ or one CPU system */
   8813 		if (qid == 0)
   8814 			wm_start_locked(ifp);
   8815 		wm_transmit_locked(ifp, txq);
   8816 	}
   8817 }
   8818 
   8819 /* Interrupt */
   8820 
   8821 /*
   8822  * wm_txeof:
   8823  *
   8824  *	Helper; handle transmit interrupts.
   8825  */
   8826 static bool
   8827 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8828 {
   8829 	struct wm_softc *sc = txq->txq_sc;
   8830 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8831 	struct wm_txsoft *txs;
   8832 	int count = 0;
   8833 	int i;
   8834 	uint8_t status;
   8835 	bool more = false;
   8836 
   8837 	KASSERT(mutex_owned(txq->txq_lock));
   8838 
   8839 	if (txq->txq_stopping)
   8840 		return false;
   8841 
   8842 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8843 
   8844 	/*
   8845 	 * Go through the Tx list and free mbufs for those
   8846 	 * frames which have been transmitted.
   8847 	 */
   8848 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8849 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8850 		if (limit-- == 0) {
   8851 			more = true;
   8852 			DPRINTF(sc, WM_DEBUG_TX,
   8853 			    ("%s: TX: loop limited, job %d is not processed\n",
   8854 				device_xname(sc->sc_dev), i));
   8855 			break;
   8856 		}
   8857 
   8858 		txs = &txq->txq_soft[i];
   8859 
   8860 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8861 			device_xname(sc->sc_dev), i));
   8862 
   8863 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8864 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8865 
   8866 		status =
   8867 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8868 		if ((status & WTX_ST_DD) == 0) {
   8869 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8870 			    BUS_DMASYNC_PREREAD);
   8871 			break;
   8872 		}
   8873 
   8874 		count++;
   8875 		DPRINTF(sc, WM_DEBUG_TX,
   8876 		    ("%s: TX: job %d done: descs %d..%d\n",
   8877 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8878 		    txs->txs_lastdesc));
   8879 
   8880 		/*
   8881 		 * XXX We should probably be using the statistics
   8882 		 * XXX registers, but I don't know if they exist
   8883 		 * XXX on chips before the i82544.
   8884 		 */
   8885 
   8886 #ifdef WM_EVENT_COUNTERS
   8887 		if (status & WTX_ST_TU)
   8888 			WM_Q_EVCNT_INCR(txq, underrun);
   8889 #endif /* WM_EVENT_COUNTERS */
   8890 
   8891 		/*
   8892 		 * 82574 and newer's document says the status field has neither
   8893 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8894 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8895 		 * Developer's Manual", 82574 datasheet and newer.
   8896 		 *
   8897 		 * XXX I saw the LC bit was set on I218 even though the media
   8898 		 * was full duplex, so the bit might be used for other
   8899 		 * meaning ...(I have no document).
   8900 		 */
   8901 
   8902 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8903 		    && ((sc->sc_type < WM_T_82574)
   8904 			|| (sc->sc_type == WM_T_80003))) {
   8905 			if_statinc(ifp, if_oerrors);
   8906 			if (status & WTX_ST_LC)
   8907 				log(LOG_WARNING, "%s: late collision\n",
   8908 				    device_xname(sc->sc_dev));
   8909 			else if (status & WTX_ST_EC) {
   8910 				if_statadd(ifp, if_collisions,
   8911 				    TX_COLLISION_THRESHOLD + 1);
   8912 				log(LOG_WARNING, "%s: excessive collisions\n",
   8913 				    device_xname(sc->sc_dev));
   8914 			}
   8915 		} else
   8916 			if_statinc(ifp, if_opackets);
   8917 
   8918 		txq->txq_packets++;
   8919 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8920 
   8921 		txq->txq_free += txs->txs_ndesc;
   8922 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8923 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8924 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8925 		m_freem(txs->txs_mbuf);
   8926 		txs->txs_mbuf = NULL;
   8927 	}
   8928 
   8929 	/* Update the dirty transmit buffer pointer. */
   8930 	txq->txq_sdirty = i;
   8931 	DPRINTF(sc, WM_DEBUG_TX,
   8932 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8933 
   8934 	if (count != 0)
   8935 		rnd_add_uint32(&sc->rnd_source, count);
   8936 
   8937 	/*
   8938 	 * If there are no more pending transmissions, cancel the watchdog
   8939 	 * timer.
   8940 	 */
   8941 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8942 		txq->txq_sending = false;
   8943 
   8944 	return more;
   8945 }
   8946 
   8947 static inline uint32_t
   8948 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8949 {
   8950 	struct wm_softc *sc = rxq->rxq_sc;
   8951 
   8952 	if (sc->sc_type == WM_T_82574)
   8953 		return EXTRXC_STATUS(
   8954 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   8955 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8956 		return NQRXC_STATUS(
   8957 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   8958 	else
   8959 		return rxq->rxq_descs[idx].wrx_status;
   8960 }
   8961 
   8962 static inline uint32_t
   8963 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8964 {
   8965 	struct wm_softc *sc = rxq->rxq_sc;
   8966 
   8967 	if (sc->sc_type == WM_T_82574)
   8968 		return EXTRXC_ERROR(
   8969 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   8970 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8971 		return NQRXC_ERROR(
   8972 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   8973 	else
   8974 		return rxq->rxq_descs[idx].wrx_errors;
   8975 }
   8976 
   8977 static inline uint16_t
   8978 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8979 {
   8980 	struct wm_softc *sc = rxq->rxq_sc;
   8981 
   8982 	if (sc->sc_type == WM_T_82574)
   8983 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8984 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8985 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8986 	else
   8987 		return rxq->rxq_descs[idx].wrx_special;
   8988 }
   8989 
   8990 static inline int
   8991 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8992 {
   8993 	struct wm_softc *sc = rxq->rxq_sc;
   8994 
   8995 	if (sc->sc_type == WM_T_82574)
   8996 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8997 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8998 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8999 	else
   9000 		return rxq->rxq_descs[idx].wrx_len;
   9001 }
   9002 
   9003 #ifdef WM_DEBUG
   9004 static inline uint32_t
   9005 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9006 {
   9007 	struct wm_softc *sc = rxq->rxq_sc;
   9008 
   9009 	if (sc->sc_type == WM_T_82574)
   9010 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9011 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9012 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9013 	else
   9014 		return 0;
   9015 }
   9016 
   9017 static inline uint8_t
   9018 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9019 {
   9020 	struct wm_softc *sc = rxq->rxq_sc;
   9021 
   9022 	if (sc->sc_type == WM_T_82574)
   9023 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9024 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9025 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9026 	else
   9027 		return 0;
   9028 }
   9029 #endif /* WM_DEBUG */
   9030 
   9031 static inline bool
   9032 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9033     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9034 {
   9035 
   9036 	if (sc->sc_type == WM_T_82574)
   9037 		return (status & ext_bit) != 0;
   9038 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9039 		return (status & nq_bit) != 0;
   9040 	else
   9041 		return (status & legacy_bit) != 0;
   9042 }
   9043 
   9044 static inline bool
   9045 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9046     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9047 {
   9048 
   9049 	if (sc->sc_type == WM_T_82574)
   9050 		return (error & ext_bit) != 0;
   9051 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9052 		return (error & nq_bit) != 0;
   9053 	else
   9054 		return (error & legacy_bit) != 0;
   9055 }
   9056 
   9057 static inline bool
   9058 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9059 {
   9060 
   9061 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9062 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9063 		return true;
   9064 	else
   9065 		return false;
   9066 }
   9067 
   9068 static inline bool
   9069 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9070 {
   9071 	struct wm_softc *sc = rxq->rxq_sc;
   9072 
   9073 	/* XXX missing error bit for newqueue? */
   9074 	if (wm_rxdesc_is_set_error(sc, errors,
   9075 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9076 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9077 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9078 		NQRXC_ERROR_RXE)) {
   9079 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9080 		    EXTRXC_ERROR_SE, 0))
   9081 			log(LOG_WARNING, "%s: symbol error\n",
   9082 			    device_xname(sc->sc_dev));
   9083 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9084 		    EXTRXC_ERROR_SEQ, 0))
   9085 			log(LOG_WARNING, "%s: receive sequence error\n",
   9086 			    device_xname(sc->sc_dev));
   9087 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9088 		    EXTRXC_ERROR_CE, 0))
   9089 			log(LOG_WARNING, "%s: CRC error\n",
   9090 			    device_xname(sc->sc_dev));
   9091 		return true;
   9092 	}
   9093 
   9094 	return false;
   9095 }
   9096 
   9097 static inline bool
   9098 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9099 {
   9100 	struct wm_softc *sc = rxq->rxq_sc;
   9101 
   9102 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9103 		NQRXC_STATUS_DD)) {
   9104 		/* We have processed all of the receive descriptors. */
   9105 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9106 		return false;
   9107 	}
   9108 
   9109 	return true;
   9110 }
   9111 
   9112 static inline bool
   9113 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9114     uint16_t vlantag, struct mbuf *m)
   9115 {
   9116 
   9117 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9118 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9119 		vlan_set_tag(m, le16toh(vlantag));
   9120 	}
   9121 
   9122 	return true;
   9123 }
   9124 
   9125 static inline void
   9126 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9127     uint32_t errors, struct mbuf *m)
   9128 {
   9129 	struct wm_softc *sc = rxq->rxq_sc;
   9130 
   9131 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9132 		if (wm_rxdesc_is_set_status(sc, status,
   9133 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9134 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9135 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9136 			if (wm_rxdesc_is_set_error(sc, errors,
   9137 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9138 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9139 		}
   9140 		if (wm_rxdesc_is_set_status(sc, status,
   9141 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9142 			/*
   9143 			 * Note: we don't know if this was TCP or UDP,
   9144 			 * so we just set both bits, and expect the
   9145 			 * upper layers to deal.
   9146 			 */
   9147 			WM_Q_EVCNT_INCR(rxq, tusum);
   9148 			m->m_pkthdr.csum_flags |=
   9149 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9150 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9151 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9152 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9153 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9154 		}
   9155 	}
   9156 }
   9157 
   9158 /*
   9159  * wm_rxeof:
   9160  *
   9161  *	Helper; handle receive interrupts.
   9162  */
   9163 static bool
   9164 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9165 {
   9166 	struct wm_softc *sc = rxq->rxq_sc;
   9167 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9168 	struct wm_rxsoft *rxs;
   9169 	struct mbuf *m;
   9170 	int i, len;
   9171 	int count = 0;
   9172 	uint32_t status, errors;
   9173 	uint16_t vlantag;
   9174 	bool more = false;
   9175 
   9176 	KASSERT(mutex_owned(rxq->rxq_lock));
   9177 
   9178 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9179 		if (limit-- == 0) {
   9180 			rxq->rxq_ptr = i;
   9181 			more = true;
   9182 			DPRINTF(sc, WM_DEBUG_RX,
   9183 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9184 				device_xname(sc->sc_dev), i));
   9185 			break;
   9186 		}
   9187 
   9188 		rxs = &rxq->rxq_soft[i];
   9189 
   9190 		DPRINTF(sc, WM_DEBUG_RX,
   9191 		    ("%s: RX: checking descriptor %d\n",
   9192 			device_xname(sc->sc_dev), i));
   9193 		wm_cdrxsync(rxq, i,
   9194 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9195 
   9196 		status = wm_rxdesc_get_status(rxq, i);
   9197 		errors = wm_rxdesc_get_errors(rxq, i);
   9198 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9199 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9200 #ifdef WM_DEBUG
   9201 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9202 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9203 #endif
   9204 
   9205 		if (!wm_rxdesc_dd(rxq, i, status)) {
   9206 			/*
   9207 			 * Update the receive pointer holding rxq_lock
   9208 			 * consistent with increment counter.
   9209 			 */
   9210 			rxq->rxq_ptr = i;
   9211 			break;
   9212 		}
   9213 
   9214 		count++;
   9215 		if (__predict_false(rxq->rxq_discard)) {
   9216 			DPRINTF(sc, WM_DEBUG_RX,
   9217 			    ("%s: RX: discarding contents of descriptor %d\n",
   9218 				device_xname(sc->sc_dev), i));
   9219 			wm_init_rxdesc(rxq, i);
   9220 			if (wm_rxdesc_is_eop(rxq, status)) {
   9221 				/* Reset our state. */
   9222 				DPRINTF(sc, WM_DEBUG_RX,
   9223 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9224 					device_xname(sc->sc_dev)));
   9225 				rxq->rxq_discard = 0;
   9226 			}
   9227 			continue;
   9228 		}
   9229 
   9230 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9231 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9232 
   9233 		m = rxs->rxs_mbuf;
   9234 
   9235 		/*
   9236 		 * Add a new receive buffer to the ring, unless of
   9237 		 * course the length is zero. Treat the latter as a
   9238 		 * failed mapping.
   9239 		 */
   9240 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9241 			/*
   9242 			 * Failed, throw away what we've done so
   9243 			 * far, and discard the rest of the packet.
   9244 			 */
   9245 			if_statinc(ifp, if_ierrors);
   9246 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9247 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9248 			wm_init_rxdesc(rxq, i);
   9249 			if (!wm_rxdesc_is_eop(rxq, status))
   9250 				rxq->rxq_discard = 1;
   9251 			if (rxq->rxq_head != NULL)
   9252 				m_freem(rxq->rxq_head);
   9253 			WM_RXCHAIN_RESET(rxq);
   9254 			DPRINTF(sc, WM_DEBUG_RX,
   9255 			    ("%s: RX: Rx buffer allocation failed, "
   9256 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9257 				rxq->rxq_discard ? " (discard)" : ""));
   9258 			continue;
   9259 		}
   9260 
   9261 		m->m_len = len;
   9262 		rxq->rxq_len += len;
   9263 		DPRINTF(sc, WM_DEBUG_RX,
   9264 		    ("%s: RX: buffer at %p len %d\n",
   9265 			device_xname(sc->sc_dev), m->m_data, len));
   9266 
   9267 		/* If this is not the end of the packet, keep looking. */
   9268 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9269 			WM_RXCHAIN_LINK(rxq, m);
   9270 			DPRINTF(sc, WM_DEBUG_RX,
   9271 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9272 				device_xname(sc->sc_dev), rxq->rxq_len));
   9273 			continue;
   9274 		}
   9275 
   9276 		/*
   9277 		 * Okay, we have the entire packet now. The chip is
   9278 		 * configured to include the FCS except I35[04], I21[01].
   9279 		 * (not all chips can be configured to strip it), so we need
   9280 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9281 		 * in RCTL register is always set, so we don't trim it.
   9282 		 * PCH2 and newer chip also not include FCS when jumbo
   9283 		 * frame is used to do workaround an errata.
   9284 		 * May need to adjust length of previous mbuf in the
   9285 		 * chain if the current mbuf is too short.
   9286 		 */
   9287 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9288 			if (m->m_len < ETHER_CRC_LEN) {
   9289 				rxq->rxq_tail->m_len
   9290 				    -= (ETHER_CRC_LEN - m->m_len);
   9291 				m->m_len = 0;
   9292 			} else
   9293 				m->m_len -= ETHER_CRC_LEN;
   9294 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9295 		} else
   9296 			len = rxq->rxq_len;
   9297 
   9298 		WM_RXCHAIN_LINK(rxq, m);
   9299 
   9300 		*rxq->rxq_tailp = NULL;
   9301 		m = rxq->rxq_head;
   9302 
   9303 		WM_RXCHAIN_RESET(rxq);
   9304 
   9305 		DPRINTF(sc, WM_DEBUG_RX,
   9306 		    ("%s: RX: have entire packet, len -> %d\n",
   9307 			device_xname(sc->sc_dev), len));
   9308 
   9309 		/* If an error occurred, update stats and drop the packet. */
   9310 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9311 			m_freem(m);
   9312 			continue;
   9313 		}
   9314 
   9315 		/* No errors.  Receive the packet. */
   9316 		m_set_rcvif(m, ifp);
   9317 		m->m_pkthdr.len = len;
   9318 		/*
   9319 		 * TODO
   9320 		 * should be save rsshash and rsstype to this mbuf.
   9321 		 */
   9322 		DPRINTF(sc, WM_DEBUG_RX,
   9323 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9324 			device_xname(sc->sc_dev), rsstype, rsshash));
   9325 
   9326 		/*
   9327 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9328 		 * for us.  Associate the tag with the packet.
   9329 		 */
   9330 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9331 			continue;
   9332 
   9333 		/* Set up checksum info for this packet. */
   9334 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9335 		/*
   9336 		 * Update the receive pointer holding rxq_lock consistent with
   9337 		 * increment counter.
   9338 		 */
   9339 		rxq->rxq_ptr = i;
   9340 		rxq->rxq_packets++;
   9341 		rxq->rxq_bytes += len;
   9342 		mutex_exit(rxq->rxq_lock);
   9343 
   9344 		/* Pass it on. */
   9345 		if_percpuq_enqueue(sc->sc_ipq, m);
   9346 
   9347 		mutex_enter(rxq->rxq_lock);
   9348 
   9349 		if (rxq->rxq_stopping)
   9350 			break;
   9351 	}
   9352 
   9353 	if (count != 0)
   9354 		rnd_add_uint32(&sc->rnd_source, count);
   9355 
   9356 	DPRINTF(sc, WM_DEBUG_RX,
   9357 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9358 
   9359 	return more;
   9360 }
   9361 
   9362 /*
   9363  * wm_linkintr_gmii:
   9364  *
   9365  *	Helper; handle link interrupts for GMII.
   9366  */
   9367 static void
   9368 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9369 {
   9370 	device_t dev = sc->sc_dev;
   9371 	uint32_t status, reg;
   9372 	bool link;
   9373 	int rv;
   9374 
   9375 	KASSERT(WM_CORE_LOCKED(sc));
   9376 
   9377 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9378 		__func__));
   9379 
   9380 	if ((icr & ICR_LSC) == 0) {
   9381 		if (icr & ICR_RXSEQ)
   9382 			DPRINTF(sc, WM_DEBUG_LINK,
   9383 			    ("%s: LINK Receive sequence error\n",
   9384 				device_xname(dev)));
   9385 		return;
   9386 	}
   9387 
   9388 	/* Link status changed */
   9389 	status = CSR_READ(sc, WMREG_STATUS);
   9390 	link = status & STATUS_LU;
   9391 	if (link) {
   9392 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9393 			device_xname(dev),
   9394 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9395 		if (wm_phy_need_linkdown_discard(sc))
   9396 			wm_clear_linkdown_discard(sc);
   9397 	} else {
   9398 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9399 			device_xname(dev)));
   9400 		if (wm_phy_need_linkdown_discard(sc))
   9401 			wm_set_linkdown_discard(sc);
   9402 	}
   9403 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9404 		wm_gig_downshift_workaround_ich8lan(sc);
   9405 
   9406 	if ((sc->sc_type == WM_T_ICH8)
   9407 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9408 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9409 	}
   9410 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9411 		device_xname(dev)));
   9412 	mii_pollstat(&sc->sc_mii);
   9413 	if (sc->sc_type == WM_T_82543) {
   9414 		int miistatus, active;
   9415 
   9416 		/*
   9417 		 * With 82543, we need to force speed and
   9418 		 * duplex on the MAC equal to what the PHY
   9419 		 * speed and duplex configuration is.
   9420 		 */
   9421 		miistatus = sc->sc_mii.mii_media_status;
   9422 
   9423 		if (miistatus & IFM_ACTIVE) {
   9424 			active = sc->sc_mii.mii_media_active;
   9425 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9426 			switch (IFM_SUBTYPE(active)) {
   9427 			case IFM_10_T:
   9428 				sc->sc_ctrl |= CTRL_SPEED_10;
   9429 				break;
   9430 			case IFM_100_TX:
   9431 				sc->sc_ctrl |= CTRL_SPEED_100;
   9432 				break;
   9433 			case IFM_1000_T:
   9434 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9435 				break;
   9436 			default:
   9437 				/*
   9438 				 * Fiber?
   9439 				 * Shoud not enter here.
   9440 				 */
   9441 				device_printf(dev, "unknown media (%x)\n",
   9442 				    active);
   9443 				break;
   9444 			}
   9445 			if (active & IFM_FDX)
   9446 				sc->sc_ctrl |= CTRL_FD;
   9447 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9448 		}
   9449 	} else if (sc->sc_type == WM_T_PCH) {
   9450 		wm_k1_gig_workaround_hv(sc,
   9451 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9452 	}
   9453 
   9454 	/*
   9455 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9456 	 * aggressive resulting in many collisions. To avoid this, increase
   9457 	 * the IPG and reduce Rx latency in the PHY.
   9458 	 */
   9459 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9460 	    && link) {
   9461 		uint32_t tipg_reg;
   9462 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9463 		bool fdx;
   9464 		uint16_t emi_addr, emi_val;
   9465 
   9466 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9467 		tipg_reg &= ~TIPG_IPGT_MASK;
   9468 		fdx = status & STATUS_FD;
   9469 
   9470 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9471 			tipg_reg |= 0xff;
   9472 			/* Reduce Rx latency in analog PHY */
   9473 			emi_val = 0;
   9474 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9475 		    fdx && speed != STATUS_SPEED_1000) {
   9476 			tipg_reg |= 0xc;
   9477 			emi_val = 1;
   9478 		} else {
   9479 			/* Roll back the default values */
   9480 			tipg_reg |= 0x08;
   9481 			emi_val = 1;
   9482 		}
   9483 
   9484 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9485 
   9486 		rv = sc->phy.acquire(sc);
   9487 		if (rv)
   9488 			return;
   9489 
   9490 		if (sc->sc_type == WM_T_PCH2)
   9491 			emi_addr = I82579_RX_CONFIG;
   9492 		else
   9493 			emi_addr = I217_RX_CONFIG;
   9494 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9495 
   9496 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9497 			uint16_t phy_reg;
   9498 
   9499 			sc->phy.readreg_locked(dev, 2,
   9500 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9501 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9502 			if (speed == STATUS_SPEED_100
   9503 			    || speed == STATUS_SPEED_10)
   9504 				phy_reg |= 0x3e8;
   9505 			else
   9506 				phy_reg |= 0xfa;
   9507 			sc->phy.writereg_locked(dev, 2,
   9508 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9509 
   9510 			if (speed == STATUS_SPEED_1000) {
   9511 				sc->phy.readreg_locked(dev, 2,
   9512 				    HV_PM_CTRL, &phy_reg);
   9513 
   9514 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9515 
   9516 				sc->phy.writereg_locked(dev, 2,
   9517 				    HV_PM_CTRL, phy_reg);
   9518 			}
   9519 		}
   9520 		sc->phy.release(sc);
   9521 
   9522 		if (rv)
   9523 			return;
   9524 
   9525 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9526 			uint16_t data, ptr_gap;
   9527 
   9528 			if (speed == STATUS_SPEED_1000) {
   9529 				rv = sc->phy.acquire(sc);
   9530 				if (rv)
   9531 					return;
   9532 
   9533 				rv = sc->phy.readreg_locked(dev, 2,
   9534 				    I82579_UNKNOWN1, &data);
   9535 				if (rv) {
   9536 					sc->phy.release(sc);
   9537 					return;
   9538 				}
   9539 
   9540 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9541 				if (ptr_gap < 0x18) {
   9542 					data &= ~(0x3ff << 2);
   9543 					data |= (0x18 << 2);
   9544 					rv = sc->phy.writereg_locked(dev,
   9545 					    2, I82579_UNKNOWN1, data);
   9546 				}
   9547 				sc->phy.release(sc);
   9548 				if (rv)
   9549 					return;
   9550 			} else {
   9551 				rv = sc->phy.acquire(sc);
   9552 				if (rv)
   9553 					return;
   9554 
   9555 				rv = sc->phy.writereg_locked(dev, 2,
   9556 				    I82579_UNKNOWN1, 0xc023);
   9557 				sc->phy.release(sc);
   9558 				if (rv)
   9559 					return;
   9560 
   9561 			}
   9562 		}
   9563 	}
   9564 
   9565 	/*
   9566 	 * I217 Packet Loss issue:
   9567 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9568 	 * on power up.
   9569 	 * Set the Beacon Duration for I217 to 8 usec
   9570 	 */
   9571 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9572 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9573 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9574 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9575 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9576 	}
   9577 
   9578 	/* Work-around I218 hang issue */
   9579 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9580 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9581 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9582 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9583 		wm_k1_workaround_lpt_lp(sc, link);
   9584 
   9585 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9586 		/*
   9587 		 * Set platform power management values for Latency
   9588 		 * Tolerance Reporting (LTR)
   9589 		 */
   9590 		wm_platform_pm_pch_lpt(sc,
   9591 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9592 	}
   9593 
   9594 	/* Clear link partner's EEE ability */
   9595 	sc->eee_lp_ability = 0;
   9596 
   9597 	/* FEXTNVM6 K1-off workaround */
   9598 	if (sc->sc_type == WM_T_PCH_SPT) {
   9599 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9600 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9601 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9602 		else
   9603 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9604 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9605 	}
   9606 
   9607 	if (!link)
   9608 		return;
   9609 
   9610 	switch (sc->sc_type) {
   9611 	case WM_T_PCH2:
   9612 		wm_k1_workaround_lv(sc);
   9613 		/* FALLTHROUGH */
   9614 	case WM_T_PCH:
   9615 		if (sc->sc_phytype == WMPHY_82578)
   9616 			wm_link_stall_workaround_hv(sc);
   9617 		break;
   9618 	default:
   9619 		break;
   9620 	}
   9621 
   9622 	/* Enable/Disable EEE after link up */
   9623 	if (sc->sc_phytype > WMPHY_82579)
   9624 		wm_set_eee_pchlan(sc);
   9625 }
   9626 
   9627 /*
   9628  * wm_linkintr_tbi:
   9629  *
   9630  *	Helper; handle link interrupts for TBI mode.
   9631  */
   9632 static void
   9633 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9634 {
   9635 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9636 	uint32_t status;
   9637 
   9638 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9639 		__func__));
   9640 
   9641 	status = CSR_READ(sc, WMREG_STATUS);
   9642 	if (icr & ICR_LSC) {
   9643 		wm_check_for_link(sc);
   9644 		if (status & STATUS_LU) {
   9645 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9646 				device_xname(sc->sc_dev),
   9647 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9648 			/*
   9649 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9650 			 * so we should update sc->sc_ctrl
   9651 			 */
   9652 
   9653 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9654 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9655 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9656 			if (status & STATUS_FD)
   9657 				sc->sc_tctl |=
   9658 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9659 			else
   9660 				sc->sc_tctl |=
   9661 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9662 			if (sc->sc_ctrl & CTRL_TFCE)
   9663 				sc->sc_fcrtl |= FCRTL_XONE;
   9664 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9665 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9666 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9667 			sc->sc_tbi_linkup = 1;
   9668 			if_link_state_change(ifp, LINK_STATE_UP);
   9669 		} else {
   9670 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9671 				device_xname(sc->sc_dev)));
   9672 			sc->sc_tbi_linkup = 0;
   9673 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9674 		}
   9675 		/* Update LED */
   9676 		wm_tbi_serdes_set_linkled(sc);
   9677 	} else if (icr & ICR_RXSEQ)
   9678 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9679 			device_xname(sc->sc_dev)));
   9680 }
   9681 
   9682 /*
   9683  * wm_linkintr_serdes:
   9684  *
   9685  *	Helper; handle link interrupts for TBI mode.
   9686  */
   9687 static void
   9688 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9689 {
   9690 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9691 	struct mii_data *mii = &sc->sc_mii;
   9692 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9693 	uint32_t pcs_adv, pcs_lpab, reg;
   9694 
   9695 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9696 		__func__));
   9697 
   9698 	if (icr & ICR_LSC) {
   9699 		/* Check PCS */
   9700 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9701 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9702 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9703 				device_xname(sc->sc_dev)));
   9704 			mii->mii_media_status |= IFM_ACTIVE;
   9705 			sc->sc_tbi_linkup = 1;
   9706 			if_link_state_change(ifp, LINK_STATE_UP);
   9707 		} else {
   9708 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9709 				device_xname(sc->sc_dev)));
   9710 			mii->mii_media_status |= IFM_NONE;
   9711 			sc->sc_tbi_linkup = 0;
   9712 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9713 			wm_tbi_serdes_set_linkled(sc);
   9714 			return;
   9715 		}
   9716 		mii->mii_media_active |= IFM_1000_SX;
   9717 		if ((reg & PCS_LSTS_FDX) != 0)
   9718 			mii->mii_media_active |= IFM_FDX;
   9719 		else
   9720 			mii->mii_media_active |= IFM_HDX;
   9721 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9722 			/* Check flow */
   9723 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9724 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9725 				DPRINTF(sc, WM_DEBUG_LINK,
   9726 				    ("XXX LINKOK but not ACOMP\n"));
   9727 				return;
   9728 			}
   9729 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9730 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9731 			DPRINTF(sc, WM_DEBUG_LINK,
   9732 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9733 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9734 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9735 				mii->mii_media_active |= IFM_FLOW
   9736 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9737 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9738 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9739 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9740 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9741 				mii->mii_media_active |= IFM_FLOW
   9742 				    | IFM_ETH_TXPAUSE;
   9743 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9744 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9745 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9746 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9747 				mii->mii_media_active |= IFM_FLOW
   9748 				    | IFM_ETH_RXPAUSE;
   9749 		}
   9750 		/* Update LED */
   9751 		wm_tbi_serdes_set_linkled(sc);
   9752 	} else
   9753 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9754 		    device_xname(sc->sc_dev)));
   9755 }
   9756 
   9757 /*
   9758  * wm_linkintr:
   9759  *
   9760  *	Helper; handle link interrupts.
   9761  */
   9762 static void
   9763 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9764 {
   9765 
   9766 	KASSERT(WM_CORE_LOCKED(sc));
   9767 
   9768 	if (sc->sc_flags & WM_F_HAS_MII)
   9769 		wm_linkintr_gmii(sc, icr);
   9770 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9771 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9772 		wm_linkintr_serdes(sc, icr);
   9773 	else
   9774 		wm_linkintr_tbi(sc, icr);
   9775 }
   9776 
   9777 
   9778 static inline void
   9779 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9780 {
   9781 
   9782 	if (wmq->wmq_txrx_use_workqueue)
   9783 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9784 	else
   9785 		softint_schedule(wmq->wmq_si);
   9786 }
   9787 
   9788 /*
   9789  * wm_intr_legacy:
   9790  *
   9791  *	Interrupt service routine for INTx and MSI.
   9792  */
   9793 static int
   9794 wm_intr_legacy(void *arg)
   9795 {
   9796 	struct wm_softc *sc = arg;
   9797 	struct wm_queue *wmq = &sc->sc_queue[0];
   9798 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9799 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9800 	uint32_t icr, rndval = 0;
   9801 	int handled = 0;
   9802 
   9803 	while (1 /* CONSTCOND */) {
   9804 		icr = CSR_READ(sc, WMREG_ICR);
   9805 		if ((icr & sc->sc_icr) == 0)
   9806 			break;
   9807 		if (handled == 0)
   9808 			DPRINTF(sc, WM_DEBUG_TX,
   9809 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9810 		if (rndval == 0)
   9811 			rndval = icr;
   9812 
   9813 		mutex_enter(rxq->rxq_lock);
   9814 
   9815 		if (rxq->rxq_stopping) {
   9816 			mutex_exit(rxq->rxq_lock);
   9817 			break;
   9818 		}
   9819 
   9820 		handled = 1;
   9821 
   9822 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9823 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9824 			DPRINTF(sc, WM_DEBUG_RX,
   9825 			    ("%s: RX: got Rx intr 0x%08x\n",
   9826 				device_xname(sc->sc_dev),
   9827 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9828 			WM_Q_EVCNT_INCR(rxq, intr);
   9829 		}
   9830 #endif
   9831 		/*
   9832 		 * wm_rxeof() does *not* call upper layer functions directly,
   9833 		 * as if_percpuq_enqueue() just call softint_schedule().
   9834 		 * So, we can call wm_rxeof() in interrupt context.
   9835 		 */
   9836 		wm_rxeof(rxq, UINT_MAX);
   9837 
   9838 		mutex_exit(rxq->rxq_lock);
   9839 		mutex_enter(txq->txq_lock);
   9840 
   9841 		if (txq->txq_stopping) {
   9842 			mutex_exit(txq->txq_lock);
   9843 			break;
   9844 		}
   9845 
   9846 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9847 		if (icr & ICR_TXDW) {
   9848 			DPRINTF(sc, WM_DEBUG_TX,
   9849 			    ("%s: TX: got TXDW interrupt\n",
   9850 				device_xname(sc->sc_dev)));
   9851 			WM_Q_EVCNT_INCR(txq, txdw);
   9852 		}
   9853 #endif
   9854 		wm_txeof(txq, UINT_MAX);
   9855 
   9856 		mutex_exit(txq->txq_lock);
   9857 		WM_CORE_LOCK(sc);
   9858 
   9859 		if (sc->sc_core_stopping) {
   9860 			WM_CORE_UNLOCK(sc);
   9861 			break;
   9862 		}
   9863 
   9864 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9865 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9866 			wm_linkintr(sc, icr);
   9867 		}
   9868 		if ((icr & ICR_GPI(0)) != 0)
   9869 			device_printf(sc->sc_dev, "got module interrupt\n");
   9870 
   9871 		WM_CORE_UNLOCK(sc);
   9872 
   9873 		if (icr & ICR_RXO) {
   9874 #if defined(WM_DEBUG)
   9875 			log(LOG_WARNING, "%s: Receive overrun\n",
   9876 			    device_xname(sc->sc_dev));
   9877 #endif /* defined(WM_DEBUG) */
   9878 		}
   9879 	}
   9880 
   9881 	rnd_add_uint32(&sc->rnd_source, rndval);
   9882 
   9883 	if (handled) {
   9884 		/* Try to get more packets going. */
   9885 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9886 		wm_sched_handle_queue(sc, wmq);
   9887 	}
   9888 
   9889 	return handled;
   9890 }
   9891 
   9892 static inline void
   9893 wm_txrxintr_disable(struct wm_queue *wmq)
   9894 {
   9895 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9896 
   9897 	if (sc->sc_type == WM_T_82574)
   9898 		CSR_WRITE(sc, WMREG_IMC,
   9899 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9900 	else if (sc->sc_type == WM_T_82575)
   9901 		CSR_WRITE(sc, WMREG_EIMC,
   9902 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9903 	else
   9904 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9905 }
   9906 
   9907 static inline void
   9908 wm_txrxintr_enable(struct wm_queue *wmq)
   9909 {
   9910 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9911 
   9912 	wm_itrs_calculate(sc, wmq);
   9913 
   9914 	/*
   9915 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9916 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9917 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9918 	 * while each wm_handle_queue(wmq) is runnig.
   9919 	 */
   9920 	if (sc->sc_type == WM_T_82574)
   9921 		CSR_WRITE(sc, WMREG_IMS,
   9922 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9923 	else if (sc->sc_type == WM_T_82575)
   9924 		CSR_WRITE(sc, WMREG_EIMS,
   9925 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9926 	else
   9927 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9928 }
   9929 
   9930 static int
   9931 wm_txrxintr_msix(void *arg)
   9932 {
   9933 	struct wm_queue *wmq = arg;
   9934 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9935 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9936 	struct wm_softc *sc = txq->txq_sc;
   9937 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9938 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9939 	bool txmore;
   9940 	bool rxmore;
   9941 
   9942 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9943 
   9944 	DPRINTF(sc, WM_DEBUG_TX,
   9945 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9946 
   9947 	wm_txrxintr_disable(wmq);
   9948 
   9949 	mutex_enter(txq->txq_lock);
   9950 
   9951 	if (txq->txq_stopping) {
   9952 		mutex_exit(txq->txq_lock);
   9953 		return 0;
   9954 	}
   9955 
   9956 	WM_Q_EVCNT_INCR(txq, txdw);
   9957 	txmore = wm_txeof(txq, txlimit);
   9958 	/* wm_deferred start() is done in wm_handle_queue(). */
   9959 	mutex_exit(txq->txq_lock);
   9960 
   9961 	DPRINTF(sc, WM_DEBUG_RX,
   9962 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9963 	mutex_enter(rxq->rxq_lock);
   9964 
   9965 	if (rxq->rxq_stopping) {
   9966 		mutex_exit(rxq->rxq_lock);
   9967 		return 0;
   9968 	}
   9969 
   9970 	WM_Q_EVCNT_INCR(rxq, intr);
   9971 	rxmore = wm_rxeof(rxq, rxlimit);
   9972 	mutex_exit(rxq->rxq_lock);
   9973 
   9974 	wm_itrs_writereg(sc, wmq);
   9975 
   9976 	if (txmore || rxmore) {
   9977 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9978 		wm_sched_handle_queue(sc, wmq);
   9979 	} else
   9980 		wm_txrxintr_enable(wmq);
   9981 
   9982 	return 1;
   9983 }
   9984 
   9985 static void
   9986 wm_handle_queue(void *arg)
   9987 {
   9988 	struct wm_queue *wmq = arg;
   9989 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9990 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9991 	struct wm_softc *sc = txq->txq_sc;
   9992 	u_int txlimit = sc->sc_tx_process_limit;
   9993 	u_int rxlimit = sc->sc_rx_process_limit;
   9994 	bool txmore;
   9995 	bool rxmore;
   9996 
   9997 	mutex_enter(txq->txq_lock);
   9998 	if (txq->txq_stopping) {
   9999 		mutex_exit(txq->txq_lock);
   10000 		return;
   10001 	}
   10002 	txmore = wm_txeof(txq, txlimit);
   10003 	wm_deferred_start_locked(txq);
   10004 	mutex_exit(txq->txq_lock);
   10005 
   10006 	mutex_enter(rxq->rxq_lock);
   10007 	if (rxq->rxq_stopping) {
   10008 		mutex_exit(rxq->rxq_lock);
   10009 		return;
   10010 	}
   10011 	WM_Q_EVCNT_INCR(rxq, defer);
   10012 	rxmore = wm_rxeof(rxq, rxlimit);
   10013 	mutex_exit(rxq->rxq_lock);
   10014 
   10015 	if (txmore || rxmore) {
   10016 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10017 		wm_sched_handle_queue(sc, wmq);
   10018 	} else
   10019 		wm_txrxintr_enable(wmq);
   10020 }
   10021 
   10022 static void
   10023 wm_handle_queue_work(struct work *wk, void *context)
   10024 {
   10025 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10026 
   10027 	/*
   10028 	 * "enqueued flag" is not required here.
   10029 	 */
   10030 	wm_handle_queue(wmq);
   10031 }
   10032 
   10033 /*
   10034  * wm_linkintr_msix:
   10035  *
   10036  *	Interrupt service routine for link status change for MSI-X.
   10037  */
   10038 static int
   10039 wm_linkintr_msix(void *arg)
   10040 {
   10041 	struct wm_softc *sc = arg;
   10042 	uint32_t reg;
   10043 	bool has_rxo;
   10044 
   10045 	reg = CSR_READ(sc, WMREG_ICR);
   10046 	WM_CORE_LOCK(sc);
   10047 	DPRINTF(sc, WM_DEBUG_LINK,
   10048 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10049 		device_xname(sc->sc_dev), reg));
   10050 
   10051 	if (sc->sc_core_stopping)
   10052 		goto out;
   10053 
   10054 	if ((reg & ICR_LSC) != 0) {
   10055 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10056 		wm_linkintr(sc, ICR_LSC);
   10057 	}
   10058 	if ((reg & ICR_GPI(0)) != 0)
   10059 		device_printf(sc->sc_dev, "got module interrupt\n");
   10060 
   10061 	/*
   10062 	 * XXX 82574 MSI-X mode workaround
   10063 	 *
   10064 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10065 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10066 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10067 	 * interrupts by writing WMREG_ICS to process receive packets.
   10068 	 */
   10069 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10070 #if defined(WM_DEBUG)
   10071 		log(LOG_WARNING, "%s: Receive overrun\n",
   10072 		    device_xname(sc->sc_dev));
   10073 #endif /* defined(WM_DEBUG) */
   10074 
   10075 		has_rxo = true;
   10076 		/*
   10077 		 * The RXO interrupt is very high rate when receive traffic is
   10078 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10079 		 * interrupts. ICR_OTHER will be enabled at the end of
   10080 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10081 		 * ICR_RXQ(1) interrupts.
   10082 		 */
   10083 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10084 
   10085 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10086 	}
   10087 
   10088 
   10089 
   10090 out:
   10091 	WM_CORE_UNLOCK(sc);
   10092 
   10093 	if (sc->sc_type == WM_T_82574) {
   10094 		if (!has_rxo)
   10095 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10096 		else
   10097 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10098 	} else if (sc->sc_type == WM_T_82575)
   10099 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10100 	else
   10101 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10102 
   10103 	return 1;
   10104 }
   10105 
   10106 /*
   10107  * Media related.
   10108  * GMII, SGMII, TBI (and SERDES)
   10109  */
   10110 
   10111 /* Common */
   10112 
   10113 /*
   10114  * wm_tbi_serdes_set_linkled:
   10115  *
   10116  *	Update the link LED on TBI and SERDES devices.
   10117  */
   10118 static void
   10119 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10120 {
   10121 
   10122 	if (sc->sc_tbi_linkup)
   10123 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10124 	else
   10125 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10126 
   10127 	/* 82540 or newer devices are active low */
   10128 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10129 
   10130 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10131 }
   10132 
   10133 /* GMII related */
   10134 
   10135 /*
   10136  * wm_gmii_reset:
   10137  *
   10138  *	Reset the PHY.
   10139  */
   10140 static void
   10141 wm_gmii_reset(struct wm_softc *sc)
   10142 {
   10143 	uint32_t reg;
   10144 	int rv;
   10145 
   10146 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10147 		device_xname(sc->sc_dev), __func__));
   10148 
   10149 	rv = sc->phy.acquire(sc);
   10150 	if (rv != 0) {
   10151 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10152 		    __func__);
   10153 		return;
   10154 	}
   10155 
   10156 	switch (sc->sc_type) {
   10157 	case WM_T_82542_2_0:
   10158 	case WM_T_82542_2_1:
   10159 		/* null */
   10160 		break;
   10161 	case WM_T_82543:
   10162 		/*
   10163 		 * With 82543, we need to force speed and duplex on the MAC
   10164 		 * equal to what the PHY speed and duplex configuration is.
   10165 		 * In addition, we need to perform a hardware reset on the PHY
   10166 		 * to take it out of reset.
   10167 		 */
   10168 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10169 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10170 
   10171 		/* The PHY reset pin is active-low. */
   10172 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10173 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10174 		    CTRL_EXT_SWDPIN(4));
   10175 		reg |= CTRL_EXT_SWDPIO(4);
   10176 
   10177 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10178 		CSR_WRITE_FLUSH(sc);
   10179 		delay(10*1000);
   10180 
   10181 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10182 		CSR_WRITE_FLUSH(sc);
   10183 		delay(150);
   10184 #if 0
   10185 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10186 #endif
   10187 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10188 		break;
   10189 	case WM_T_82544:	/* Reset 10000us */
   10190 	case WM_T_82540:
   10191 	case WM_T_82545:
   10192 	case WM_T_82545_3:
   10193 	case WM_T_82546:
   10194 	case WM_T_82546_3:
   10195 	case WM_T_82541:
   10196 	case WM_T_82541_2:
   10197 	case WM_T_82547:
   10198 	case WM_T_82547_2:
   10199 	case WM_T_82571:	/* Reset 100us */
   10200 	case WM_T_82572:
   10201 	case WM_T_82573:
   10202 	case WM_T_82574:
   10203 	case WM_T_82575:
   10204 	case WM_T_82576:
   10205 	case WM_T_82580:
   10206 	case WM_T_I350:
   10207 	case WM_T_I354:
   10208 	case WM_T_I210:
   10209 	case WM_T_I211:
   10210 	case WM_T_82583:
   10211 	case WM_T_80003:
   10212 		/* Generic reset */
   10213 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10214 		CSR_WRITE_FLUSH(sc);
   10215 		delay(20000);
   10216 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10217 		CSR_WRITE_FLUSH(sc);
   10218 		delay(20000);
   10219 
   10220 		if ((sc->sc_type == WM_T_82541)
   10221 		    || (sc->sc_type == WM_T_82541_2)
   10222 		    || (sc->sc_type == WM_T_82547)
   10223 		    || (sc->sc_type == WM_T_82547_2)) {
   10224 			/* Workaround for igp are done in igp_reset() */
   10225 			/* XXX add code to set LED after phy reset */
   10226 		}
   10227 		break;
   10228 	case WM_T_ICH8:
   10229 	case WM_T_ICH9:
   10230 	case WM_T_ICH10:
   10231 	case WM_T_PCH:
   10232 	case WM_T_PCH2:
   10233 	case WM_T_PCH_LPT:
   10234 	case WM_T_PCH_SPT:
   10235 	case WM_T_PCH_CNP:
   10236 		/* Generic reset */
   10237 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10238 		CSR_WRITE_FLUSH(sc);
   10239 		delay(100);
   10240 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10241 		CSR_WRITE_FLUSH(sc);
   10242 		delay(150);
   10243 		break;
   10244 	default:
   10245 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10246 		    __func__);
   10247 		break;
   10248 	}
   10249 
   10250 	sc->phy.release(sc);
   10251 
   10252 	/* get_cfg_done */
   10253 	wm_get_cfg_done(sc);
   10254 
   10255 	/* Extra setup */
   10256 	switch (sc->sc_type) {
   10257 	case WM_T_82542_2_0:
   10258 	case WM_T_82542_2_1:
   10259 	case WM_T_82543:
   10260 	case WM_T_82544:
   10261 	case WM_T_82540:
   10262 	case WM_T_82545:
   10263 	case WM_T_82545_3:
   10264 	case WM_T_82546:
   10265 	case WM_T_82546_3:
   10266 	case WM_T_82541_2:
   10267 	case WM_T_82547_2:
   10268 	case WM_T_82571:
   10269 	case WM_T_82572:
   10270 	case WM_T_82573:
   10271 	case WM_T_82574:
   10272 	case WM_T_82583:
   10273 	case WM_T_82575:
   10274 	case WM_T_82576:
   10275 	case WM_T_82580:
   10276 	case WM_T_I350:
   10277 	case WM_T_I354:
   10278 	case WM_T_I210:
   10279 	case WM_T_I211:
   10280 	case WM_T_80003:
   10281 		/* Null */
   10282 		break;
   10283 	case WM_T_82541:
   10284 	case WM_T_82547:
   10285 		/* XXX Configure actively LED after PHY reset */
   10286 		break;
   10287 	case WM_T_ICH8:
   10288 	case WM_T_ICH9:
   10289 	case WM_T_ICH10:
   10290 	case WM_T_PCH:
   10291 	case WM_T_PCH2:
   10292 	case WM_T_PCH_LPT:
   10293 	case WM_T_PCH_SPT:
   10294 	case WM_T_PCH_CNP:
   10295 		wm_phy_post_reset(sc);
   10296 		break;
   10297 	default:
   10298 		panic("%s: unknown type\n", __func__);
   10299 		break;
   10300 	}
   10301 }
   10302 
   10303 /*
   10304  * Setup sc_phytype and mii_{read|write}reg.
   10305  *
   10306  *  To identify PHY type, correct read/write function should be selected.
   10307  * To select correct read/write function, PCI ID or MAC type are required
   10308  * without accessing PHY registers.
   10309  *
   10310  *  On the first call of this function, PHY ID is not known yet. Check
   10311  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10312  * result might be incorrect.
   10313  *
   10314  *  In the second call, PHY OUI and model is used to identify PHY type.
   10315  * It might not be perfect because of the lack of compared entry, but it
   10316  * would be better than the first call.
   10317  *
   10318  *  If the detected new result and previous assumption is different,
   10319  * diagnous message will be printed.
   10320  */
   10321 static void
   10322 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10323     uint16_t phy_model)
   10324 {
   10325 	device_t dev = sc->sc_dev;
   10326 	struct mii_data *mii = &sc->sc_mii;
   10327 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10328 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10329 	mii_readreg_t new_readreg;
   10330 	mii_writereg_t new_writereg;
   10331 	bool dodiag = true;
   10332 
   10333 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10334 		device_xname(sc->sc_dev), __func__));
   10335 
   10336 	/*
   10337 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10338 	 * incorrect. So don't print diag output when it's 2nd call.
   10339 	 */
   10340 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10341 		dodiag = false;
   10342 
   10343 	if (mii->mii_readreg == NULL) {
   10344 		/*
   10345 		 *  This is the first call of this function. For ICH and PCH
   10346 		 * variants, it's difficult to determine the PHY access method
   10347 		 * by sc_type, so use the PCI product ID for some devices.
   10348 		 */
   10349 
   10350 		switch (sc->sc_pcidevid) {
   10351 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10352 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10353 			/* 82577 */
   10354 			new_phytype = WMPHY_82577;
   10355 			break;
   10356 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10357 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10358 			/* 82578 */
   10359 			new_phytype = WMPHY_82578;
   10360 			break;
   10361 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10362 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10363 			/* 82579 */
   10364 			new_phytype = WMPHY_82579;
   10365 			break;
   10366 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10367 		case PCI_PRODUCT_INTEL_82801I_BM:
   10368 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10369 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10370 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10371 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10372 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10373 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10374 			/* ICH8, 9, 10 with 82567 */
   10375 			new_phytype = WMPHY_BM;
   10376 			break;
   10377 		default:
   10378 			break;
   10379 		}
   10380 	} else {
   10381 		/* It's not the first call. Use PHY OUI and model */
   10382 		switch (phy_oui) {
   10383 		case MII_OUI_ATTANSIC: /* XXX ??? */
   10384 			switch (phy_model) {
   10385 			case 0x0004: /* XXX */
   10386 				new_phytype = WMPHY_82578;
   10387 				break;
   10388 			default:
   10389 				break;
   10390 			}
   10391 			break;
   10392 		case MII_OUI_xxMARVELL:
   10393 			switch (phy_model) {
   10394 			case MII_MODEL_xxMARVELL_I210:
   10395 				new_phytype = WMPHY_I210;
   10396 				break;
   10397 			case MII_MODEL_xxMARVELL_E1011:
   10398 			case MII_MODEL_xxMARVELL_E1000_3:
   10399 			case MII_MODEL_xxMARVELL_E1000_5:
   10400 			case MII_MODEL_xxMARVELL_E1112:
   10401 				new_phytype = WMPHY_M88;
   10402 				break;
   10403 			case MII_MODEL_xxMARVELL_E1149:
   10404 				new_phytype = WMPHY_BM;
   10405 				break;
   10406 			case MII_MODEL_xxMARVELL_E1111:
   10407 			case MII_MODEL_xxMARVELL_I347:
   10408 			case MII_MODEL_xxMARVELL_E1512:
   10409 			case MII_MODEL_xxMARVELL_E1340M:
   10410 			case MII_MODEL_xxMARVELL_E1543:
   10411 				new_phytype = WMPHY_M88;
   10412 				break;
   10413 			case MII_MODEL_xxMARVELL_I82563:
   10414 				new_phytype = WMPHY_GG82563;
   10415 				break;
   10416 			default:
   10417 				break;
   10418 			}
   10419 			break;
   10420 		case MII_OUI_INTEL:
   10421 			switch (phy_model) {
   10422 			case MII_MODEL_INTEL_I82577:
   10423 				new_phytype = WMPHY_82577;
   10424 				break;
   10425 			case MII_MODEL_INTEL_I82579:
   10426 				new_phytype = WMPHY_82579;
   10427 				break;
   10428 			case MII_MODEL_INTEL_I217:
   10429 				new_phytype = WMPHY_I217;
   10430 				break;
   10431 			case MII_MODEL_INTEL_I82580:
   10432 				new_phytype = WMPHY_82580;
   10433 				break;
   10434 			case MII_MODEL_INTEL_I350:
   10435 				new_phytype = WMPHY_I350;
   10436 				break;
   10437 				break;
   10438 			default:
   10439 				break;
   10440 			}
   10441 			break;
   10442 		case MII_OUI_yyINTEL:
   10443 			switch (phy_model) {
   10444 			case MII_MODEL_yyINTEL_I82562G:
   10445 			case MII_MODEL_yyINTEL_I82562EM:
   10446 			case MII_MODEL_yyINTEL_I82562ET:
   10447 				new_phytype = WMPHY_IFE;
   10448 				break;
   10449 			case MII_MODEL_yyINTEL_IGP01E1000:
   10450 				new_phytype = WMPHY_IGP;
   10451 				break;
   10452 			case MII_MODEL_yyINTEL_I82566:
   10453 				new_phytype = WMPHY_IGP_3;
   10454 				break;
   10455 			default:
   10456 				break;
   10457 			}
   10458 			break;
   10459 		default:
   10460 			break;
   10461 		}
   10462 
   10463 		if (dodiag) {
   10464 			if (new_phytype == WMPHY_UNKNOWN)
   10465 				aprint_verbose_dev(dev,
   10466 				    "%s: Unknown PHY model. OUI=%06x, "
   10467 				    "model=%04x\n", __func__, phy_oui,
   10468 				    phy_model);
   10469 
   10470 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10471 			    && (sc->sc_phytype != new_phytype)) {
   10472 				aprint_error_dev(dev, "Previously assumed PHY "
   10473 				    "type(%u) was incorrect. PHY type from PHY"
   10474 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10475 			}
   10476 		}
   10477 	}
   10478 
   10479 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10480 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10481 		/* SGMII */
   10482 		new_readreg = wm_sgmii_readreg;
   10483 		new_writereg = wm_sgmii_writereg;
   10484 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10485 		/* BM2 (phyaddr == 1) */
   10486 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10487 		    && (new_phytype != WMPHY_BM)
   10488 		    && (new_phytype != WMPHY_UNKNOWN))
   10489 			doubt_phytype = new_phytype;
   10490 		new_phytype = WMPHY_BM;
   10491 		new_readreg = wm_gmii_bm_readreg;
   10492 		new_writereg = wm_gmii_bm_writereg;
   10493 	} else if (sc->sc_type >= WM_T_PCH) {
   10494 		/* All PCH* use _hv_ */
   10495 		new_readreg = wm_gmii_hv_readreg;
   10496 		new_writereg = wm_gmii_hv_writereg;
   10497 	} else if (sc->sc_type >= WM_T_ICH8) {
   10498 		/* non-82567 ICH8, 9 and 10 */
   10499 		new_readreg = wm_gmii_i82544_readreg;
   10500 		new_writereg = wm_gmii_i82544_writereg;
   10501 	} else if (sc->sc_type >= WM_T_80003) {
   10502 		/* 80003 */
   10503 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10504 		    && (new_phytype != WMPHY_GG82563)
   10505 		    && (new_phytype != WMPHY_UNKNOWN))
   10506 			doubt_phytype = new_phytype;
   10507 		new_phytype = WMPHY_GG82563;
   10508 		new_readreg = wm_gmii_i80003_readreg;
   10509 		new_writereg = wm_gmii_i80003_writereg;
   10510 	} else if (sc->sc_type >= WM_T_I210) {
   10511 		/* I210 and I211 */
   10512 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10513 		    && (new_phytype != WMPHY_I210)
   10514 		    && (new_phytype != WMPHY_UNKNOWN))
   10515 			doubt_phytype = new_phytype;
   10516 		new_phytype = WMPHY_I210;
   10517 		new_readreg = wm_gmii_gs40g_readreg;
   10518 		new_writereg = wm_gmii_gs40g_writereg;
   10519 	} else if (sc->sc_type >= WM_T_82580) {
   10520 		/* 82580, I350 and I354 */
   10521 		new_readreg = wm_gmii_82580_readreg;
   10522 		new_writereg = wm_gmii_82580_writereg;
   10523 	} else if (sc->sc_type >= WM_T_82544) {
   10524 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10525 		new_readreg = wm_gmii_i82544_readreg;
   10526 		new_writereg = wm_gmii_i82544_writereg;
   10527 	} else {
   10528 		new_readreg = wm_gmii_i82543_readreg;
   10529 		new_writereg = wm_gmii_i82543_writereg;
   10530 	}
   10531 
   10532 	if (new_phytype == WMPHY_BM) {
   10533 		/* All BM use _bm_ */
   10534 		new_readreg = wm_gmii_bm_readreg;
   10535 		new_writereg = wm_gmii_bm_writereg;
   10536 	}
   10537 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10538 		/* All PCH* use _hv_ */
   10539 		new_readreg = wm_gmii_hv_readreg;
   10540 		new_writereg = wm_gmii_hv_writereg;
   10541 	}
   10542 
   10543 	/* Diag output */
   10544 	if (dodiag) {
   10545 		if (doubt_phytype != WMPHY_UNKNOWN)
   10546 			aprint_error_dev(dev, "Assumed new PHY type was "
   10547 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10548 			    new_phytype);
   10549 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10550 		    && (sc->sc_phytype != new_phytype))
   10551 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10552 			    "was incorrect. New PHY type = %u\n",
   10553 			    sc->sc_phytype, new_phytype);
   10554 
   10555 		if ((mii->mii_readreg != NULL) &&
   10556 		    (new_phytype == WMPHY_UNKNOWN))
   10557 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10558 
   10559 		if ((mii->mii_readreg != NULL) &&
   10560 		    (mii->mii_readreg != new_readreg))
   10561 			aprint_error_dev(dev, "Previously assumed PHY "
   10562 			    "read/write function was incorrect.\n");
   10563 	}
   10564 
   10565 	/* Update now */
   10566 	sc->sc_phytype = new_phytype;
   10567 	mii->mii_readreg = new_readreg;
   10568 	mii->mii_writereg = new_writereg;
   10569 	if (new_readreg == wm_gmii_hv_readreg) {
   10570 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10571 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10572 	} else if (new_readreg == wm_sgmii_readreg) {
   10573 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10574 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10575 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10576 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10577 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10578 	}
   10579 }
   10580 
   10581 /*
   10582  * wm_get_phy_id_82575:
   10583  *
   10584  * Return PHY ID. Return -1 if it failed.
   10585  */
   10586 static int
   10587 wm_get_phy_id_82575(struct wm_softc *sc)
   10588 {
   10589 	uint32_t reg;
   10590 	int phyid = -1;
   10591 
   10592 	/* XXX */
   10593 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10594 		return -1;
   10595 
   10596 	if (wm_sgmii_uses_mdio(sc)) {
   10597 		switch (sc->sc_type) {
   10598 		case WM_T_82575:
   10599 		case WM_T_82576:
   10600 			reg = CSR_READ(sc, WMREG_MDIC);
   10601 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10602 			break;
   10603 		case WM_T_82580:
   10604 		case WM_T_I350:
   10605 		case WM_T_I354:
   10606 		case WM_T_I210:
   10607 		case WM_T_I211:
   10608 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10609 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10610 			break;
   10611 		default:
   10612 			return -1;
   10613 		}
   10614 	}
   10615 
   10616 	return phyid;
   10617 }
   10618 
   10619 /*
   10620  * wm_gmii_mediainit:
   10621  *
   10622  *	Initialize media for use on 1000BASE-T devices.
   10623  */
   10624 static void
   10625 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10626 {
   10627 	device_t dev = sc->sc_dev;
   10628 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10629 	struct mii_data *mii = &sc->sc_mii;
   10630 
   10631 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10632 		device_xname(sc->sc_dev), __func__));
   10633 
   10634 	/* We have GMII. */
   10635 	sc->sc_flags |= WM_F_HAS_MII;
   10636 
   10637 	if (sc->sc_type == WM_T_80003)
   10638 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10639 	else
   10640 		sc->sc_tipg = TIPG_1000T_DFLT;
   10641 
   10642 	/*
   10643 	 * Let the chip set speed/duplex on its own based on
   10644 	 * signals from the PHY.
   10645 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10646 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10647 	 */
   10648 	sc->sc_ctrl |= CTRL_SLU;
   10649 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10650 
   10651 	/* Initialize our media structures and probe the GMII. */
   10652 	mii->mii_ifp = ifp;
   10653 
   10654 	mii->mii_statchg = wm_gmii_statchg;
   10655 
   10656 	/* get PHY control from SMBus to PCIe */
   10657 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10658 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10659 	    || (sc->sc_type == WM_T_PCH_CNP))
   10660 		wm_init_phy_workarounds_pchlan(sc);
   10661 
   10662 	wm_gmii_reset(sc);
   10663 
   10664 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10665 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10666 	    wm_gmii_mediastatus, sc->sc_core_lock);
   10667 
   10668 	/* Setup internal SGMII PHY for SFP */
   10669 	wm_sgmii_sfp_preconfig(sc);
   10670 
   10671 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10672 	    || (sc->sc_type == WM_T_82580)
   10673 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10674 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10675 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10676 			/* Attach only one port */
   10677 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10678 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10679 		} else {
   10680 			int i, id;
   10681 			uint32_t ctrl_ext;
   10682 
   10683 			id = wm_get_phy_id_82575(sc);
   10684 			if (id != -1) {
   10685 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10686 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10687 			}
   10688 			if ((id == -1)
   10689 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10690 				/* Power on sgmii phy if it is disabled */
   10691 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10692 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10693 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10694 				CSR_WRITE_FLUSH(sc);
   10695 				delay(300*1000); /* XXX too long */
   10696 
   10697 				/*
   10698 				 * From 1 to 8.
   10699 				 *
   10700 				 * I2C access fails with I2C register's ERROR
   10701 				 * bit set, so prevent error message while
   10702 				 * scanning.
   10703 				 */
   10704 				sc->phy.no_errprint = true;
   10705 				for (i = 1; i < 8; i++)
   10706 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10707 					    0xffffffff, i, MII_OFFSET_ANY,
   10708 					    MIIF_DOPAUSE);
   10709 				sc->phy.no_errprint = false;
   10710 
   10711 				/* Restore previous sfp cage power state */
   10712 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10713 			}
   10714 		}
   10715 	} else
   10716 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10717 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10718 
   10719 	/*
   10720 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10721 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10722 	 */
   10723 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10724 		|| (sc->sc_type == WM_T_PCH_SPT)
   10725 		|| (sc->sc_type == WM_T_PCH_CNP))
   10726 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10727 		wm_set_mdio_slow_mode_hv(sc);
   10728 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10729 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10730 	}
   10731 
   10732 	/*
   10733 	 * (For ICH8 variants)
   10734 	 * If PHY detection failed, use BM's r/w function and retry.
   10735 	 */
   10736 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10737 		/* if failed, retry with *_bm_* */
   10738 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10739 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10740 		    sc->sc_phytype);
   10741 		sc->sc_phytype = WMPHY_BM;
   10742 		mii->mii_readreg = wm_gmii_bm_readreg;
   10743 		mii->mii_writereg = wm_gmii_bm_writereg;
   10744 
   10745 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10746 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10747 	}
   10748 
   10749 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10750 		/* Any PHY wasn't find */
   10751 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10752 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10753 		sc->sc_phytype = WMPHY_NONE;
   10754 	} else {
   10755 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10756 
   10757 		/*
   10758 		 * PHY Found! Check PHY type again by the second call of
   10759 		 * wm_gmii_setup_phytype.
   10760 		 */
   10761 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10762 		    child->mii_mpd_model);
   10763 
   10764 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10765 	}
   10766 }
   10767 
   10768 /*
   10769  * wm_gmii_mediachange:	[ifmedia interface function]
   10770  *
   10771  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10772  */
   10773 static int
   10774 wm_gmii_mediachange(struct ifnet *ifp)
   10775 {
   10776 	struct wm_softc *sc = ifp->if_softc;
   10777 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10778 	uint32_t reg;
   10779 	int rc;
   10780 
   10781 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10782 		device_xname(sc->sc_dev), __func__));
   10783 	if ((ifp->if_flags & IFF_UP) == 0)
   10784 		return 0;
   10785 
   10786 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10787 	if ((sc->sc_type == WM_T_82580)
   10788 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10789 	    || (sc->sc_type == WM_T_I211)) {
   10790 		reg = CSR_READ(sc, WMREG_PHPM);
   10791 		reg &= ~PHPM_GO_LINK_D;
   10792 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10793 	}
   10794 
   10795 	/* Disable D0 LPLU. */
   10796 	wm_lplu_d0_disable(sc);
   10797 
   10798 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10799 	sc->sc_ctrl |= CTRL_SLU;
   10800 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10801 	    || (sc->sc_type > WM_T_82543)) {
   10802 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10803 	} else {
   10804 		sc->sc_ctrl &= ~CTRL_ASDE;
   10805 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10806 		if (ife->ifm_media & IFM_FDX)
   10807 			sc->sc_ctrl |= CTRL_FD;
   10808 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10809 		case IFM_10_T:
   10810 			sc->sc_ctrl |= CTRL_SPEED_10;
   10811 			break;
   10812 		case IFM_100_TX:
   10813 			sc->sc_ctrl |= CTRL_SPEED_100;
   10814 			break;
   10815 		case IFM_1000_T:
   10816 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10817 			break;
   10818 		case IFM_NONE:
   10819 			/* There is no specific setting for IFM_NONE */
   10820 			break;
   10821 		default:
   10822 			panic("wm_gmii_mediachange: bad media 0x%x",
   10823 			    ife->ifm_media);
   10824 		}
   10825 	}
   10826 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10827 	CSR_WRITE_FLUSH(sc);
   10828 
   10829 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10830 		wm_serdes_mediachange(ifp);
   10831 
   10832 	if (sc->sc_type <= WM_T_82543)
   10833 		wm_gmii_reset(sc);
   10834 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10835 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10836 		/* allow time for SFP cage time to power up phy */
   10837 		delay(300 * 1000);
   10838 		wm_gmii_reset(sc);
   10839 	}
   10840 
   10841 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10842 		return 0;
   10843 	return rc;
   10844 }
   10845 
   10846 /*
   10847  * wm_gmii_mediastatus:	[ifmedia interface function]
   10848  *
   10849  *	Get the current interface media status on a 1000BASE-T device.
   10850  */
   10851 static void
   10852 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10853 {
   10854 	struct wm_softc *sc = ifp->if_softc;
   10855 
   10856 	ether_mediastatus(ifp, ifmr);
   10857 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10858 	    | sc->sc_flowflags;
   10859 }
   10860 
   10861 #define	MDI_IO		CTRL_SWDPIN(2)
   10862 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10863 #define	MDI_CLK		CTRL_SWDPIN(3)
   10864 
   10865 static void
   10866 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10867 {
   10868 	uint32_t i, v;
   10869 
   10870 	v = CSR_READ(sc, WMREG_CTRL);
   10871 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10872 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10873 
   10874 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10875 		if (data & i)
   10876 			v |= MDI_IO;
   10877 		else
   10878 			v &= ~MDI_IO;
   10879 		CSR_WRITE(sc, WMREG_CTRL, v);
   10880 		CSR_WRITE_FLUSH(sc);
   10881 		delay(10);
   10882 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10883 		CSR_WRITE_FLUSH(sc);
   10884 		delay(10);
   10885 		CSR_WRITE(sc, WMREG_CTRL, v);
   10886 		CSR_WRITE_FLUSH(sc);
   10887 		delay(10);
   10888 	}
   10889 }
   10890 
   10891 static uint16_t
   10892 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10893 {
   10894 	uint32_t v, i;
   10895 	uint16_t data = 0;
   10896 
   10897 	v = CSR_READ(sc, WMREG_CTRL);
   10898 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10899 	v |= CTRL_SWDPIO(3);
   10900 
   10901 	CSR_WRITE(sc, WMREG_CTRL, v);
   10902 	CSR_WRITE_FLUSH(sc);
   10903 	delay(10);
   10904 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10905 	CSR_WRITE_FLUSH(sc);
   10906 	delay(10);
   10907 	CSR_WRITE(sc, WMREG_CTRL, v);
   10908 	CSR_WRITE_FLUSH(sc);
   10909 	delay(10);
   10910 
   10911 	for (i = 0; i < 16; i++) {
   10912 		data <<= 1;
   10913 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10914 		CSR_WRITE_FLUSH(sc);
   10915 		delay(10);
   10916 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10917 			data |= 1;
   10918 		CSR_WRITE(sc, WMREG_CTRL, v);
   10919 		CSR_WRITE_FLUSH(sc);
   10920 		delay(10);
   10921 	}
   10922 
   10923 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10924 	CSR_WRITE_FLUSH(sc);
   10925 	delay(10);
   10926 	CSR_WRITE(sc, WMREG_CTRL, v);
   10927 	CSR_WRITE_FLUSH(sc);
   10928 	delay(10);
   10929 
   10930 	return data;
   10931 }
   10932 
   10933 #undef MDI_IO
   10934 #undef MDI_DIR
   10935 #undef MDI_CLK
   10936 
   10937 /*
   10938  * wm_gmii_i82543_readreg:	[mii interface function]
   10939  *
   10940  *	Read a PHY register on the GMII (i82543 version).
   10941  */
   10942 static int
   10943 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10944 {
   10945 	struct wm_softc *sc = device_private(dev);
   10946 
   10947 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10948 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10949 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10950 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10951 
   10952 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10953 		device_xname(dev), phy, reg, *val));
   10954 
   10955 	return 0;
   10956 }
   10957 
   10958 /*
   10959  * wm_gmii_i82543_writereg:	[mii interface function]
   10960  *
   10961  *	Write a PHY register on the GMII (i82543 version).
   10962  */
   10963 static int
   10964 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10965 {
   10966 	struct wm_softc *sc = device_private(dev);
   10967 
   10968 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10969 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10970 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10971 	    (MII_COMMAND_START << 30), 32);
   10972 
   10973 	return 0;
   10974 }
   10975 
   10976 /*
   10977  * wm_gmii_mdic_readreg:	[mii interface function]
   10978  *
   10979  *	Read a PHY register on the GMII.
   10980  */
   10981 static int
   10982 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10983 {
   10984 	struct wm_softc *sc = device_private(dev);
   10985 	uint32_t mdic = 0;
   10986 	int i;
   10987 
   10988 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10989 	    && (reg > MII_ADDRMASK)) {
   10990 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10991 		    __func__, sc->sc_phytype, reg);
   10992 		reg &= MII_ADDRMASK;
   10993 	}
   10994 
   10995 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10996 	    MDIC_REGADD(reg));
   10997 
   10998 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10999 		delay(50);
   11000 		mdic = CSR_READ(sc, WMREG_MDIC);
   11001 		if (mdic & MDIC_READY)
   11002 			break;
   11003 	}
   11004 
   11005 	if ((mdic & MDIC_READY) == 0) {
   11006 		DPRINTF(sc, WM_DEBUG_GMII,
   11007 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   11008 			device_xname(dev), phy, reg));
   11009 		return ETIMEDOUT;
   11010 	} else if (mdic & MDIC_E) {
   11011 		/* This is normal if no PHY is present. */
   11012 		DPRINTF(sc, WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   11013 			device_xname(sc->sc_dev), phy, reg));
   11014 		return -1;
   11015 	} else
   11016 		*val = MDIC_DATA(mdic);
   11017 
   11018 	/*
   11019 	 * Allow some time after each MDIC transaction to avoid
   11020 	 * reading duplicate data in the next MDIC transaction.
   11021 	 */
   11022 	if (sc->sc_type == WM_T_PCH2)
   11023 		delay(100);
   11024 
   11025 	return 0;
   11026 }
   11027 
   11028 /*
   11029  * wm_gmii_mdic_writereg:	[mii interface function]
   11030  *
   11031  *	Write a PHY register on the GMII.
   11032  */
   11033 static int
   11034 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11035 {
   11036 	struct wm_softc *sc = device_private(dev);
   11037 	uint32_t mdic = 0;
   11038 	int i;
   11039 
   11040 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11041 	    && (reg > MII_ADDRMASK)) {
   11042 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11043 		    __func__, sc->sc_phytype, reg);
   11044 		reg &= MII_ADDRMASK;
   11045 	}
   11046 
   11047 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11048 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11049 
   11050 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11051 		delay(50);
   11052 		mdic = CSR_READ(sc, WMREG_MDIC);
   11053 		if (mdic & MDIC_READY)
   11054 			break;
   11055 	}
   11056 
   11057 	if ((mdic & MDIC_READY) == 0) {
   11058 		DPRINTF(sc, WM_DEBUG_GMII,
   11059 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11060 			device_xname(dev), phy, reg));
   11061 		return ETIMEDOUT;
   11062 	} else if (mdic & MDIC_E) {
   11063 		DPRINTF(sc, WM_DEBUG_GMII,
   11064 		    ("%s: MDIC write error: phy %d reg %d\n",
   11065 			device_xname(dev), phy, reg));
   11066 		return -1;
   11067 	}
   11068 
   11069 	/*
   11070 	 * Allow some time after each MDIC transaction to avoid
   11071 	 * reading duplicate data in the next MDIC transaction.
   11072 	 */
   11073 	if (sc->sc_type == WM_T_PCH2)
   11074 		delay(100);
   11075 
   11076 	return 0;
   11077 }
   11078 
   11079 /*
   11080  * wm_gmii_i82544_readreg:	[mii interface function]
   11081  *
   11082  *	Read a PHY register on the GMII.
   11083  */
   11084 static int
   11085 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11086 {
   11087 	struct wm_softc *sc = device_private(dev);
   11088 	int rv;
   11089 
   11090 	if (sc->phy.acquire(sc)) {
   11091 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11092 		return -1;
   11093 	}
   11094 
   11095 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11096 
   11097 	sc->phy.release(sc);
   11098 
   11099 	return rv;
   11100 }
   11101 
   11102 static int
   11103 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11104 {
   11105 	struct wm_softc *sc = device_private(dev);
   11106 	int rv;
   11107 
   11108 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11109 		switch (sc->sc_phytype) {
   11110 		case WMPHY_IGP:
   11111 		case WMPHY_IGP_2:
   11112 		case WMPHY_IGP_3:
   11113 			rv = wm_gmii_mdic_writereg(dev, phy,
   11114 			    IGPHY_PAGE_SELECT, reg);
   11115 			if (rv != 0)
   11116 				return rv;
   11117 			break;
   11118 		default:
   11119 #ifdef WM_DEBUG
   11120 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11121 			    __func__, sc->sc_phytype, reg);
   11122 #endif
   11123 			break;
   11124 		}
   11125 	}
   11126 
   11127 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11128 }
   11129 
   11130 /*
   11131  * wm_gmii_i82544_writereg:	[mii interface function]
   11132  *
   11133  *	Write a PHY register on the GMII.
   11134  */
   11135 static int
   11136 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11137 {
   11138 	struct wm_softc *sc = device_private(dev);
   11139 	int rv;
   11140 
   11141 	if (sc->phy.acquire(sc)) {
   11142 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11143 		return -1;
   11144 	}
   11145 
   11146 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11147 	sc->phy.release(sc);
   11148 
   11149 	return rv;
   11150 }
   11151 
   11152 static int
   11153 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11154 {
   11155 	struct wm_softc *sc = device_private(dev);
   11156 	int rv;
   11157 
   11158 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11159 		switch (sc->sc_phytype) {
   11160 		case WMPHY_IGP:
   11161 		case WMPHY_IGP_2:
   11162 		case WMPHY_IGP_3:
   11163 			rv = wm_gmii_mdic_writereg(dev, phy,
   11164 			    IGPHY_PAGE_SELECT, reg);
   11165 			if (rv != 0)
   11166 				return rv;
   11167 			break;
   11168 		default:
   11169 #ifdef WM_DEBUG
   11170 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11171 			    __func__, sc->sc_phytype, reg);
   11172 #endif
   11173 			break;
   11174 		}
   11175 	}
   11176 
   11177 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11178 }
   11179 
   11180 /*
   11181  * wm_gmii_i80003_readreg:	[mii interface function]
   11182  *
   11183  *	Read a PHY register on the kumeran
   11184  * This could be handled by the PHY layer if we didn't have to lock the
   11185  * resource ...
   11186  */
   11187 static int
   11188 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11189 {
   11190 	struct wm_softc *sc = device_private(dev);
   11191 	int page_select;
   11192 	uint16_t temp, temp2;
   11193 	int rv = 0;
   11194 
   11195 	if (phy != 1) /* Only one PHY on kumeran bus */
   11196 		return -1;
   11197 
   11198 	if (sc->phy.acquire(sc)) {
   11199 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11200 		return -1;
   11201 	}
   11202 
   11203 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11204 		page_select = GG82563_PHY_PAGE_SELECT;
   11205 	else {
   11206 		/*
   11207 		 * Use Alternative Page Select register to access registers
   11208 		 * 30 and 31.
   11209 		 */
   11210 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11211 	}
   11212 	temp = reg >> GG82563_PAGE_SHIFT;
   11213 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11214 		goto out;
   11215 
   11216 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11217 		/*
   11218 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11219 		 * register.
   11220 		 */
   11221 		delay(200);
   11222 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11223 		if ((rv != 0) || (temp2 != temp)) {
   11224 			device_printf(dev, "%s failed\n", __func__);
   11225 			rv = -1;
   11226 			goto out;
   11227 		}
   11228 		delay(200);
   11229 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11230 		delay(200);
   11231 	} else
   11232 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11233 
   11234 out:
   11235 	sc->phy.release(sc);
   11236 	return rv;
   11237 }
   11238 
   11239 /*
   11240  * wm_gmii_i80003_writereg:	[mii interface function]
   11241  *
   11242  *	Write a PHY register on the kumeran.
   11243  * This could be handled by the PHY layer if we didn't have to lock the
   11244  * resource ...
   11245  */
   11246 static int
   11247 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11248 {
   11249 	struct wm_softc *sc = device_private(dev);
   11250 	int page_select, rv;
   11251 	uint16_t temp, temp2;
   11252 
   11253 	if (phy != 1) /* Only one PHY on kumeran bus */
   11254 		return -1;
   11255 
   11256 	if (sc->phy.acquire(sc)) {
   11257 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11258 		return -1;
   11259 	}
   11260 
   11261 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11262 		page_select = GG82563_PHY_PAGE_SELECT;
   11263 	else {
   11264 		/*
   11265 		 * Use Alternative Page Select register to access registers
   11266 		 * 30 and 31.
   11267 		 */
   11268 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11269 	}
   11270 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11271 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11272 		goto out;
   11273 
   11274 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11275 		/*
   11276 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11277 		 * register.
   11278 		 */
   11279 		delay(200);
   11280 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11281 		if ((rv != 0) || (temp2 != temp)) {
   11282 			device_printf(dev, "%s failed\n", __func__);
   11283 			rv = -1;
   11284 			goto out;
   11285 		}
   11286 		delay(200);
   11287 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11288 		delay(200);
   11289 	} else
   11290 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11291 
   11292 out:
   11293 	sc->phy.release(sc);
   11294 	return rv;
   11295 }
   11296 
   11297 /*
   11298  * wm_gmii_bm_readreg:	[mii interface function]
   11299  *
   11300  *	Read a PHY register on the kumeran
   11301  * This could be handled by the PHY layer if we didn't have to lock the
   11302  * resource ...
   11303  */
   11304 static int
   11305 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11306 {
   11307 	struct wm_softc *sc = device_private(dev);
   11308 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11309 	int rv;
   11310 
   11311 	if (sc->phy.acquire(sc)) {
   11312 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11313 		return -1;
   11314 	}
   11315 
   11316 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11317 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11318 		    || (reg == 31)) ? 1 : phy;
   11319 	/* Page 800 works differently than the rest so it has its own func */
   11320 	if (page == BM_WUC_PAGE) {
   11321 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11322 		goto release;
   11323 	}
   11324 
   11325 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11326 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11327 		    && (sc->sc_type != WM_T_82583))
   11328 			rv = wm_gmii_mdic_writereg(dev, phy,
   11329 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11330 		else
   11331 			rv = wm_gmii_mdic_writereg(dev, phy,
   11332 			    BME1000_PHY_PAGE_SELECT, page);
   11333 		if (rv != 0)
   11334 			goto release;
   11335 	}
   11336 
   11337 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11338 
   11339 release:
   11340 	sc->phy.release(sc);
   11341 	return rv;
   11342 }
   11343 
   11344 /*
   11345  * wm_gmii_bm_writereg:	[mii interface function]
   11346  *
   11347  *	Write a PHY register on the kumeran.
   11348  * This could be handled by the PHY layer if we didn't have to lock the
   11349  * resource ...
   11350  */
   11351 static int
   11352 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11353 {
   11354 	struct wm_softc *sc = device_private(dev);
   11355 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11356 	int rv;
   11357 
   11358 	if (sc->phy.acquire(sc)) {
   11359 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11360 		return -1;
   11361 	}
   11362 
   11363 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11364 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11365 		    || (reg == 31)) ? 1 : phy;
   11366 	/* Page 800 works differently than the rest so it has its own func */
   11367 	if (page == BM_WUC_PAGE) {
   11368 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11369 		goto release;
   11370 	}
   11371 
   11372 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11373 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11374 		    && (sc->sc_type != WM_T_82583))
   11375 			rv = wm_gmii_mdic_writereg(dev, phy,
   11376 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11377 		else
   11378 			rv = wm_gmii_mdic_writereg(dev, phy,
   11379 			    BME1000_PHY_PAGE_SELECT, page);
   11380 		if (rv != 0)
   11381 			goto release;
   11382 	}
   11383 
   11384 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11385 
   11386 release:
   11387 	sc->phy.release(sc);
   11388 	return rv;
   11389 }
   11390 
   11391 /*
   11392  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11393  *  @dev: pointer to the HW structure
   11394  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11395  *
   11396  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11397  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11398  */
   11399 static int
   11400 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11401 {
   11402 #ifdef WM_DEBUG
   11403 	struct wm_softc *sc = device_private(dev);
   11404 #endif
   11405 	uint16_t temp;
   11406 	int rv;
   11407 
   11408 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11409 		device_xname(dev), __func__));
   11410 
   11411 	if (!phy_regp)
   11412 		return -1;
   11413 
   11414 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11415 
   11416 	/* Select Port Control Registers page */
   11417 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11418 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11419 	if (rv != 0)
   11420 		return rv;
   11421 
   11422 	/* Read WUCE and save it */
   11423 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11424 	if (rv != 0)
   11425 		return rv;
   11426 
   11427 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11428 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11429 	 */
   11430 	temp = *phy_regp;
   11431 	temp |= BM_WUC_ENABLE_BIT;
   11432 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11433 
   11434 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11435 		return rv;
   11436 
   11437 	/* Select Host Wakeup Registers page - caller now able to write
   11438 	 * registers on the Wakeup registers page
   11439 	 */
   11440 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11441 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11442 }
   11443 
   11444 /*
   11445  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11446  *  @dev: pointer to the HW structure
   11447  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11448  *
   11449  *  Restore BM_WUC_ENABLE_REG to its original value.
   11450  *
   11451  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11452  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11453  *  caller.
   11454  */
   11455 static int
   11456 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11457 {
   11458 #ifdef WM_DEBUG
   11459 	struct wm_softc *sc = device_private(dev);
   11460 #endif
   11461 
   11462 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11463 		device_xname(dev), __func__));
   11464 
   11465 	if (!phy_regp)
   11466 		return -1;
   11467 
   11468 	/* Select Port Control Registers page */
   11469 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11470 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11471 
   11472 	/* Restore 769.17 to its original value */
   11473 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11474 
   11475 	return 0;
   11476 }
   11477 
   11478 /*
   11479  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11480  *  @sc: pointer to the HW structure
   11481  *  @offset: register offset to be read or written
   11482  *  @val: pointer to the data to read or write
   11483  *  @rd: determines if operation is read or write
   11484  *  @page_set: BM_WUC_PAGE already set and access enabled
   11485  *
   11486  *  Read the PHY register at offset and store the retrieved information in
   11487  *  data, or write data to PHY register at offset.  Note the procedure to
   11488  *  access the PHY wakeup registers is different than reading the other PHY
   11489  *  registers. It works as such:
   11490  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11491  *  2) Set page to 800 for host (801 if we were manageability)
   11492  *  3) Write the address using the address opcode (0x11)
   11493  *  4) Read or write the data using the data opcode (0x12)
   11494  *  5) Restore 769.17.2 to its original value
   11495  *
   11496  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11497  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11498  *
   11499  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11500  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11501  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11502  */
   11503 static int
   11504 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11505 	bool page_set)
   11506 {
   11507 	struct wm_softc *sc = device_private(dev);
   11508 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11509 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11510 	uint16_t wuce;
   11511 	int rv = 0;
   11512 
   11513 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11514 		device_xname(dev), __func__));
   11515 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11516 	if ((sc->sc_type == WM_T_PCH)
   11517 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11518 		device_printf(dev,
   11519 		    "Attempting to access page %d while gig enabled.\n", page);
   11520 	}
   11521 
   11522 	if (!page_set) {
   11523 		/* Enable access to PHY wakeup registers */
   11524 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11525 		if (rv != 0) {
   11526 			device_printf(dev,
   11527 			    "%s: Could not enable PHY wakeup reg access\n",
   11528 			    __func__);
   11529 			return rv;
   11530 		}
   11531 	}
   11532 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11533 		device_xname(sc->sc_dev), __func__, page, regnum));
   11534 
   11535 	/*
   11536 	 * 2) Access PHY wakeup register.
   11537 	 * See wm_access_phy_wakeup_reg_bm.
   11538 	 */
   11539 
   11540 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11541 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11542 	if (rv != 0)
   11543 		return rv;
   11544 
   11545 	if (rd) {
   11546 		/* Read the Wakeup register page value using opcode 0x12 */
   11547 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11548 	} else {
   11549 		/* Write the Wakeup register page value using opcode 0x12 */
   11550 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11551 	}
   11552 	if (rv != 0)
   11553 		return rv;
   11554 
   11555 	if (!page_set)
   11556 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11557 
   11558 	return rv;
   11559 }
   11560 
   11561 /*
   11562  * wm_gmii_hv_readreg:	[mii interface function]
   11563  *
   11564  *	Read a PHY register on the kumeran
   11565  * This could be handled by the PHY layer if we didn't have to lock the
   11566  * resource ...
   11567  */
   11568 static int
   11569 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11570 {
   11571 	struct wm_softc *sc = device_private(dev);
   11572 	int rv;
   11573 
   11574 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11575 		device_xname(dev), __func__));
   11576 	if (sc->phy.acquire(sc)) {
   11577 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11578 		return -1;
   11579 	}
   11580 
   11581 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11582 	sc->phy.release(sc);
   11583 	return rv;
   11584 }
   11585 
   11586 static int
   11587 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11588 {
   11589 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11590 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11591 	int rv;
   11592 
   11593 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11594 
   11595 	/* Page 800 works differently than the rest so it has its own func */
   11596 	if (page == BM_WUC_PAGE)
   11597 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11598 
   11599 	/*
   11600 	 * Lower than page 768 works differently than the rest so it has its
   11601 	 * own func
   11602 	 */
   11603 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11604 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11605 		return -1;
   11606 	}
   11607 
   11608 	/*
   11609 	 * XXX I21[789] documents say that the SMBus Address register is at
   11610 	 * PHY address 01, Page 0 (not 768), Register 26.
   11611 	 */
   11612 	if (page == HV_INTC_FC_PAGE_START)
   11613 		page = 0;
   11614 
   11615 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11616 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11617 		    page << BME1000_PAGE_SHIFT);
   11618 		if (rv != 0)
   11619 			return rv;
   11620 	}
   11621 
   11622 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11623 }
   11624 
   11625 /*
   11626  * wm_gmii_hv_writereg:	[mii interface function]
   11627  *
   11628  *	Write a PHY register on the kumeran.
   11629  * This could be handled by the PHY layer if we didn't have to lock the
   11630  * resource ...
   11631  */
   11632 static int
   11633 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11634 {
   11635 	struct wm_softc *sc = device_private(dev);
   11636 	int rv;
   11637 
   11638 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11639 		device_xname(dev), __func__));
   11640 
   11641 	if (sc->phy.acquire(sc)) {
   11642 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11643 		return -1;
   11644 	}
   11645 
   11646 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11647 	sc->phy.release(sc);
   11648 
   11649 	return rv;
   11650 }
   11651 
   11652 static int
   11653 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11654 {
   11655 	struct wm_softc *sc = device_private(dev);
   11656 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11657 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11658 	int rv;
   11659 
   11660 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11661 
   11662 	/* Page 800 works differently than the rest so it has its own func */
   11663 	if (page == BM_WUC_PAGE)
   11664 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11665 		    false);
   11666 
   11667 	/*
   11668 	 * Lower than page 768 works differently than the rest so it has its
   11669 	 * own func
   11670 	 */
   11671 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11672 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11673 		return -1;
   11674 	}
   11675 
   11676 	{
   11677 		/*
   11678 		 * XXX I21[789] documents say that the SMBus Address register
   11679 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11680 		 */
   11681 		if (page == HV_INTC_FC_PAGE_START)
   11682 			page = 0;
   11683 
   11684 		/*
   11685 		 * XXX Workaround MDIO accesses being disabled after entering
   11686 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11687 		 * register is set)
   11688 		 */
   11689 		if (sc->sc_phytype == WMPHY_82578) {
   11690 			struct mii_softc *child;
   11691 
   11692 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11693 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11694 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11695 			    && ((val & (1 << 11)) != 0)) {
   11696 				device_printf(dev, "XXX need workaround\n");
   11697 			}
   11698 		}
   11699 
   11700 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11701 			rv = wm_gmii_mdic_writereg(dev, 1,
   11702 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11703 			if (rv != 0)
   11704 				return rv;
   11705 		}
   11706 	}
   11707 
   11708 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11709 }
   11710 
   11711 /*
   11712  * wm_gmii_82580_readreg:	[mii interface function]
   11713  *
   11714  *	Read a PHY register on the 82580 and I350.
   11715  * This could be handled by the PHY layer if we didn't have to lock the
   11716  * resource ...
   11717  */
   11718 static int
   11719 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11720 {
   11721 	struct wm_softc *sc = device_private(dev);
   11722 	int rv;
   11723 
   11724 	if (sc->phy.acquire(sc) != 0) {
   11725 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11726 		return -1;
   11727 	}
   11728 
   11729 #ifdef DIAGNOSTIC
   11730 	if (reg > MII_ADDRMASK) {
   11731 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11732 		    __func__, sc->sc_phytype, reg);
   11733 		reg &= MII_ADDRMASK;
   11734 	}
   11735 #endif
   11736 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11737 
   11738 	sc->phy.release(sc);
   11739 	return rv;
   11740 }
   11741 
   11742 /*
   11743  * wm_gmii_82580_writereg:	[mii interface function]
   11744  *
   11745  *	Write a PHY register on the 82580 and I350.
   11746  * This could be handled by the PHY layer if we didn't have to lock the
   11747  * resource ...
   11748  */
   11749 static int
   11750 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11751 {
   11752 	struct wm_softc *sc = device_private(dev);
   11753 	int rv;
   11754 
   11755 	if (sc->phy.acquire(sc) != 0) {
   11756 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11757 		return -1;
   11758 	}
   11759 
   11760 #ifdef DIAGNOSTIC
   11761 	if (reg > MII_ADDRMASK) {
   11762 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11763 		    __func__, sc->sc_phytype, reg);
   11764 		reg &= MII_ADDRMASK;
   11765 	}
   11766 #endif
   11767 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11768 
   11769 	sc->phy.release(sc);
   11770 	return rv;
   11771 }
   11772 
   11773 /*
   11774  * wm_gmii_gs40g_readreg:	[mii interface function]
   11775  *
   11776  *	Read a PHY register on the I2100 and I211.
   11777  * This could be handled by the PHY layer if we didn't have to lock the
   11778  * resource ...
   11779  */
   11780 static int
   11781 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11782 {
   11783 	struct wm_softc *sc = device_private(dev);
   11784 	int page, offset;
   11785 	int rv;
   11786 
   11787 	/* Acquire semaphore */
   11788 	if (sc->phy.acquire(sc)) {
   11789 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11790 		return -1;
   11791 	}
   11792 
   11793 	/* Page select */
   11794 	page = reg >> GS40G_PAGE_SHIFT;
   11795 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11796 	if (rv != 0)
   11797 		goto release;
   11798 
   11799 	/* Read reg */
   11800 	offset = reg & GS40G_OFFSET_MASK;
   11801 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11802 
   11803 release:
   11804 	sc->phy.release(sc);
   11805 	return rv;
   11806 }
   11807 
   11808 /*
   11809  * wm_gmii_gs40g_writereg:	[mii interface function]
   11810  *
   11811  *	Write a PHY register on the I210 and I211.
   11812  * This could be handled by the PHY layer if we didn't have to lock the
   11813  * resource ...
   11814  */
   11815 static int
   11816 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11817 {
   11818 	struct wm_softc *sc = device_private(dev);
   11819 	uint16_t page;
   11820 	int offset, rv;
   11821 
   11822 	/* Acquire semaphore */
   11823 	if (sc->phy.acquire(sc)) {
   11824 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11825 		return -1;
   11826 	}
   11827 
   11828 	/* Page select */
   11829 	page = reg >> GS40G_PAGE_SHIFT;
   11830 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11831 	if (rv != 0)
   11832 		goto release;
   11833 
   11834 	/* Write reg */
   11835 	offset = reg & GS40G_OFFSET_MASK;
   11836 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11837 
   11838 release:
   11839 	/* Release semaphore */
   11840 	sc->phy.release(sc);
   11841 	return rv;
   11842 }
   11843 
   11844 /*
   11845  * wm_gmii_statchg:	[mii interface function]
   11846  *
   11847  *	Callback from MII layer when media changes.
   11848  */
   11849 static void
   11850 wm_gmii_statchg(struct ifnet *ifp)
   11851 {
   11852 	struct wm_softc *sc = ifp->if_softc;
   11853 	struct mii_data *mii = &sc->sc_mii;
   11854 
   11855 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11856 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11857 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11858 
   11859 	/* Get flow control negotiation result. */
   11860 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11861 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11862 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11863 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11864 	}
   11865 
   11866 	if (sc->sc_flowflags & IFM_FLOW) {
   11867 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11868 			sc->sc_ctrl |= CTRL_TFCE;
   11869 			sc->sc_fcrtl |= FCRTL_XONE;
   11870 		}
   11871 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11872 			sc->sc_ctrl |= CTRL_RFCE;
   11873 	}
   11874 
   11875 	if (mii->mii_media_active & IFM_FDX) {
   11876 		DPRINTF(sc, WM_DEBUG_LINK,
   11877 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11878 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11879 	} else {
   11880 		DPRINTF(sc, WM_DEBUG_LINK,
   11881 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11882 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11883 	}
   11884 
   11885 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11886 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11887 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11888 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11889 	if (sc->sc_type == WM_T_80003) {
   11890 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11891 		case IFM_1000_T:
   11892 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11893 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11894 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11895 			break;
   11896 		default:
   11897 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11898 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11899 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11900 			break;
   11901 		}
   11902 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11903 	}
   11904 }
   11905 
   11906 /* kumeran related (80003, ICH* and PCH*) */
   11907 
   11908 /*
   11909  * wm_kmrn_readreg:
   11910  *
   11911  *	Read a kumeran register
   11912  */
   11913 static int
   11914 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11915 {
   11916 	int rv;
   11917 
   11918 	if (sc->sc_type == WM_T_80003)
   11919 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11920 	else
   11921 		rv = sc->phy.acquire(sc);
   11922 	if (rv != 0) {
   11923 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11924 		    __func__);
   11925 		return rv;
   11926 	}
   11927 
   11928 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11929 
   11930 	if (sc->sc_type == WM_T_80003)
   11931 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11932 	else
   11933 		sc->phy.release(sc);
   11934 
   11935 	return rv;
   11936 }
   11937 
   11938 static int
   11939 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11940 {
   11941 
   11942 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11943 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11944 	    KUMCTRLSTA_REN);
   11945 	CSR_WRITE_FLUSH(sc);
   11946 	delay(2);
   11947 
   11948 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11949 
   11950 	return 0;
   11951 }
   11952 
   11953 /*
   11954  * wm_kmrn_writereg:
   11955  *
   11956  *	Write a kumeran register
   11957  */
   11958 static int
   11959 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11960 {
   11961 	int rv;
   11962 
   11963 	if (sc->sc_type == WM_T_80003)
   11964 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11965 	else
   11966 		rv = sc->phy.acquire(sc);
   11967 	if (rv != 0) {
   11968 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11969 		    __func__);
   11970 		return rv;
   11971 	}
   11972 
   11973 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11974 
   11975 	if (sc->sc_type == WM_T_80003)
   11976 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11977 	else
   11978 		sc->phy.release(sc);
   11979 
   11980 	return rv;
   11981 }
   11982 
   11983 static int
   11984 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11985 {
   11986 
   11987 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11988 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11989 
   11990 	return 0;
   11991 }
   11992 
   11993 /*
   11994  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11995  * This access method is different from IEEE MMD.
   11996  */
   11997 static int
   11998 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11999 {
   12000 	struct wm_softc *sc = device_private(dev);
   12001 	int rv;
   12002 
   12003 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   12004 	if (rv != 0)
   12005 		return rv;
   12006 
   12007 	if (rd)
   12008 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   12009 	else
   12010 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12011 	return rv;
   12012 }
   12013 
   12014 static int
   12015 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12016 {
   12017 
   12018 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12019 }
   12020 
   12021 static int
   12022 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12023 {
   12024 
   12025 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12026 }
   12027 
   12028 /* SGMII related */
   12029 
   12030 /*
   12031  * wm_sgmii_uses_mdio
   12032  *
   12033  * Check whether the transaction is to the internal PHY or the external
   12034  * MDIO interface. Return true if it's MDIO.
   12035  */
   12036 static bool
   12037 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12038 {
   12039 	uint32_t reg;
   12040 	bool ismdio = false;
   12041 
   12042 	switch (sc->sc_type) {
   12043 	case WM_T_82575:
   12044 	case WM_T_82576:
   12045 		reg = CSR_READ(sc, WMREG_MDIC);
   12046 		ismdio = ((reg & MDIC_DEST) != 0);
   12047 		break;
   12048 	case WM_T_82580:
   12049 	case WM_T_I350:
   12050 	case WM_T_I354:
   12051 	case WM_T_I210:
   12052 	case WM_T_I211:
   12053 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12054 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12055 		break;
   12056 	default:
   12057 		break;
   12058 	}
   12059 
   12060 	return ismdio;
   12061 }
   12062 
   12063 /* Setup internal SGMII PHY for SFP */
   12064 static void
   12065 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12066 {
   12067 	uint16_t id1, id2, phyreg;
   12068 	int i, rv;
   12069 
   12070 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12071 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12072 		return;
   12073 
   12074 	for (i = 0; i < MII_NPHY; i++) {
   12075 		sc->phy.no_errprint = true;
   12076 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12077 		if (rv != 0)
   12078 			continue;
   12079 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12080 		if (rv != 0)
   12081 			continue;
   12082 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12083 			continue;
   12084 		sc->phy.no_errprint = false;
   12085 
   12086 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12087 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12088 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12089 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12090 		break;
   12091 	}
   12092 
   12093 }
   12094 
   12095 /*
   12096  * wm_sgmii_readreg:	[mii interface function]
   12097  *
   12098  *	Read a PHY register on the SGMII
   12099  * This could be handled by the PHY layer if we didn't have to lock the
   12100  * resource ...
   12101  */
   12102 static int
   12103 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12104 {
   12105 	struct wm_softc *sc = device_private(dev);
   12106 	int rv;
   12107 
   12108 	if (sc->phy.acquire(sc)) {
   12109 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12110 		return -1;
   12111 	}
   12112 
   12113 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12114 
   12115 	sc->phy.release(sc);
   12116 	return rv;
   12117 }
   12118 
   12119 static int
   12120 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12121 {
   12122 	struct wm_softc *sc = device_private(dev);
   12123 	uint32_t i2ccmd;
   12124 	int i, rv = 0;
   12125 
   12126 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12127 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12128 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12129 
   12130 	/* Poll the ready bit */
   12131 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12132 		delay(50);
   12133 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12134 		if (i2ccmd & I2CCMD_READY)
   12135 			break;
   12136 	}
   12137 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12138 		device_printf(dev, "I2CCMD Read did not complete\n");
   12139 		rv = ETIMEDOUT;
   12140 	}
   12141 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12142 		if (!sc->phy.no_errprint)
   12143 			device_printf(dev, "I2CCMD Error bit set\n");
   12144 		rv = EIO;
   12145 	}
   12146 
   12147 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12148 
   12149 	return rv;
   12150 }
   12151 
   12152 /*
   12153  * wm_sgmii_writereg:	[mii interface function]
   12154  *
   12155  *	Write a PHY register on the SGMII.
   12156  * This could be handled by the PHY layer if we didn't have to lock the
   12157  * resource ...
   12158  */
   12159 static int
   12160 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12161 {
   12162 	struct wm_softc *sc = device_private(dev);
   12163 	int rv;
   12164 
   12165 	if (sc->phy.acquire(sc) != 0) {
   12166 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12167 		return -1;
   12168 	}
   12169 
   12170 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12171 
   12172 	sc->phy.release(sc);
   12173 
   12174 	return rv;
   12175 }
   12176 
   12177 static int
   12178 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12179 {
   12180 	struct wm_softc *sc = device_private(dev);
   12181 	uint32_t i2ccmd;
   12182 	uint16_t swapdata;
   12183 	int rv = 0;
   12184 	int i;
   12185 
   12186 	/* Swap the data bytes for the I2C interface */
   12187 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12188 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12189 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12190 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12191 
   12192 	/* Poll the ready bit */
   12193 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12194 		delay(50);
   12195 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12196 		if (i2ccmd & I2CCMD_READY)
   12197 			break;
   12198 	}
   12199 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12200 		device_printf(dev, "I2CCMD Write did not complete\n");
   12201 		rv = ETIMEDOUT;
   12202 	}
   12203 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12204 		device_printf(dev, "I2CCMD Error bit set\n");
   12205 		rv = EIO;
   12206 	}
   12207 
   12208 	return rv;
   12209 }
   12210 
   12211 /* TBI related */
   12212 
   12213 static bool
   12214 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12215 {
   12216 	bool sig;
   12217 
   12218 	sig = ctrl & CTRL_SWDPIN(1);
   12219 
   12220 	/*
   12221 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12222 	 * detect a signal, 1 if they don't.
   12223 	 */
   12224 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12225 		sig = !sig;
   12226 
   12227 	return sig;
   12228 }
   12229 
   12230 /*
   12231  * wm_tbi_mediainit:
   12232  *
   12233  *	Initialize media for use on 1000BASE-X devices.
   12234  */
   12235 static void
   12236 wm_tbi_mediainit(struct wm_softc *sc)
   12237 {
   12238 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12239 	const char *sep = "";
   12240 
   12241 	if (sc->sc_type < WM_T_82543)
   12242 		sc->sc_tipg = TIPG_WM_DFLT;
   12243 	else
   12244 		sc->sc_tipg = TIPG_LG_DFLT;
   12245 
   12246 	sc->sc_tbi_serdes_anegticks = 5;
   12247 
   12248 	/* Initialize our media structures */
   12249 	sc->sc_mii.mii_ifp = ifp;
   12250 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12251 
   12252 	ifp->if_baudrate = IF_Gbps(1);
   12253 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12254 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12255 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12256 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12257 		    sc->sc_core_lock);
   12258 	} else {
   12259 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12260 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12261 	}
   12262 
   12263 	/*
   12264 	 * SWD Pins:
   12265 	 *
   12266 	 *	0 = Link LED (output)
   12267 	 *	1 = Loss Of Signal (input)
   12268 	 */
   12269 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12270 
   12271 	/* XXX Perhaps this is only for TBI */
   12272 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12273 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12274 
   12275 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12276 		sc->sc_ctrl &= ~CTRL_LRST;
   12277 
   12278 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12279 
   12280 #define	ADD(ss, mm, dd)							\
   12281 do {									\
   12282 	aprint_normal("%s%s", sep, ss);					\
   12283 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12284 	sep = ", ";							\
   12285 } while (/*CONSTCOND*/0)
   12286 
   12287 	aprint_normal_dev(sc->sc_dev, "");
   12288 
   12289 	if (sc->sc_type == WM_T_I354) {
   12290 		uint32_t status;
   12291 
   12292 		status = CSR_READ(sc, WMREG_STATUS);
   12293 		if (((status & STATUS_2P5_SKU) != 0)
   12294 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12295 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12296 		} else
   12297 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12298 	} else if (sc->sc_type == WM_T_82545) {
   12299 		/* Only 82545 is LX (XXX except SFP) */
   12300 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12301 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12302 	} else if (sc->sc_sfptype != 0) {
   12303 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12304 		switch (sc->sc_sfptype) {
   12305 		default:
   12306 		case SFF_SFP_ETH_FLAGS_1000SX:
   12307 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12308 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12309 			break;
   12310 		case SFF_SFP_ETH_FLAGS_1000LX:
   12311 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12312 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12313 			break;
   12314 		case SFF_SFP_ETH_FLAGS_1000CX:
   12315 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12316 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12317 			break;
   12318 		case SFF_SFP_ETH_FLAGS_1000T:
   12319 			ADD("1000baseT", IFM_1000_T, 0);
   12320 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12321 			break;
   12322 		case SFF_SFP_ETH_FLAGS_100FX:
   12323 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12324 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12325 			break;
   12326 		}
   12327 	} else {
   12328 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12329 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12330 	}
   12331 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12332 	aprint_normal("\n");
   12333 
   12334 #undef ADD
   12335 
   12336 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12337 }
   12338 
   12339 /*
   12340  * wm_tbi_mediachange:	[ifmedia interface function]
   12341  *
   12342  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12343  */
   12344 static int
   12345 wm_tbi_mediachange(struct ifnet *ifp)
   12346 {
   12347 	struct wm_softc *sc = ifp->if_softc;
   12348 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12349 	uint32_t status, ctrl;
   12350 	bool signal;
   12351 	int i;
   12352 
   12353 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12354 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12355 		/* XXX need some work for >= 82571 and < 82575 */
   12356 		if (sc->sc_type < WM_T_82575)
   12357 			return 0;
   12358 	}
   12359 
   12360 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12361 	    || (sc->sc_type >= WM_T_82575))
   12362 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12363 
   12364 	sc->sc_ctrl &= ~CTRL_LRST;
   12365 	sc->sc_txcw = TXCW_ANE;
   12366 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12367 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12368 	else if (ife->ifm_media & IFM_FDX)
   12369 		sc->sc_txcw |= TXCW_FD;
   12370 	else
   12371 		sc->sc_txcw |= TXCW_HD;
   12372 
   12373 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12374 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12375 
   12376 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12377 		device_xname(sc->sc_dev), sc->sc_txcw));
   12378 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12379 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12380 	CSR_WRITE_FLUSH(sc);
   12381 	delay(1000);
   12382 
   12383 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12384 	signal = wm_tbi_havesignal(sc, ctrl);
   12385 
   12386 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12387 		signal));
   12388 
   12389 	if (signal) {
   12390 		/* Have signal; wait for the link to come up. */
   12391 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12392 			delay(10000);
   12393 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12394 				break;
   12395 		}
   12396 
   12397 		DPRINTF(sc, WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12398 			device_xname(sc->sc_dev), i));
   12399 
   12400 		status = CSR_READ(sc, WMREG_STATUS);
   12401 		DPRINTF(sc, WM_DEBUG_LINK,
   12402 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12403 			device_xname(sc->sc_dev), status, STATUS_LU));
   12404 		if (status & STATUS_LU) {
   12405 			/* Link is up. */
   12406 			DPRINTF(sc, WM_DEBUG_LINK,
   12407 			    ("%s: LINK: set media -> link up %s\n",
   12408 				device_xname(sc->sc_dev),
   12409 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12410 
   12411 			/*
   12412 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12413 			 * so we should update sc->sc_ctrl
   12414 			 */
   12415 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12416 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12417 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12418 			if (status & STATUS_FD)
   12419 				sc->sc_tctl |=
   12420 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12421 			else
   12422 				sc->sc_tctl |=
   12423 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12424 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12425 				sc->sc_fcrtl |= FCRTL_XONE;
   12426 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12427 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12428 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12429 			sc->sc_tbi_linkup = 1;
   12430 		} else {
   12431 			if (i == WM_LINKUP_TIMEOUT)
   12432 				wm_check_for_link(sc);
   12433 			/* Link is down. */
   12434 			DPRINTF(sc, WM_DEBUG_LINK,
   12435 			    ("%s: LINK: set media -> link down\n",
   12436 				device_xname(sc->sc_dev)));
   12437 			sc->sc_tbi_linkup = 0;
   12438 		}
   12439 	} else {
   12440 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12441 			device_xname(sc->sc_dev)));
   12442 		sc->sc_tbi_linkup = 0;
   12443 	}
   12444 
   12445 	wm_tbi_serdes_set_linkled(sc);
   12446 
   12447 	return 0;
   12448 }
   12449 
   12450 /*
   12451  * wm_tbi_mediastatus:	[ifmedia interface function]
   12452  *
   12453  *	Get the current interface media status on a 1000BASE-X device.
   12454  */
   12455 static void
   12456 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12457 {
   12458 	struct wm_softc *sc = ifp->if_softc;
   12459 	uint32_t ctrl, status;
   12460 
   12461 	ifmr->ifm_status = IFM_AVALID;
   12462 	ifmr->ifm_active = IFM_ETHER;
   12463 
   12464 	status = CSR_READ(sc, WMREG_STATUS);
   12465 	if ((status & STATUS_LU) == 0) {
   12466 		ifmr->ifm_active |= IFM_NONE;
   12467 		return;
   12468 	}
   12469 
   12470 	ifmr->ifm_status |= IFM_ACTIVE;
   12471 	/* Only 82545 is LX */
   12472 	if (sc->sc_type == WM_T_82545)
   12473 		ifmr->ifm_active |= IFM_1000_LX;
   12474 	else
   12475 		ifmr->ifm_active |= IFM_1000_SX;
   12476 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12477 		ifmr->ifm_active |= IFM_FDX;
   12478 	else
   12479 		ifmr->ifm_active |= IFM_HDX;
   12480 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12481 	if (ctrl & CTRL_RFCE)
   12482 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12483 	if (ctrl & CTRL_TFCE)
   12484 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12485 }
   12486 
   12487 /* XXX TBI only */
   12488 static int
   12489 wm_check_for_link(struct wm_softc *sc)
   12490 {
   12491 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12492 	uint32_t rxcw;
   12493 	uint32_t ctrl;
   12494 	uint32_t status;
   12495 	bool signal;
   12496 
   12497 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   12498 		device_xname(sc->sc_dev), __func__));
   12499 
   12500 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12501 		/* XXX need some work for >= 82571 */
   12502 		if (sc->sc_type >= WM_T_82571) {
   12503 			sc->sc_tbi_linkup = 1;
   12504 			return 0;
   12505 		}
   12506 	}
   12507 
   12508 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12509 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12510 	status = CSR_READ(sc, WMREG_STATUS);
   12511 	signal = wm_tbi_havesignal(sc, ctrl);
   12512 
   12513 	DPRINTF(sc, WM_DEBUG_LINK,
   12514 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12515 		device_xname(sc->sc_dev), __func__, signal,
   12516 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12517 
   12518 	/*
   12519 	 * SWDPIN   LU RXCW
   12520 	 *	0    0	  0
   12521 	 *	0    0	  1	(should not happen)
   12522 	 *	0    1	  0	(should not happen)
   12523 	 *	0    1	  1	(should not happen)
   12524 	 *	1    0	  0	Disable autonego and force linkup
   12525 	 *	1    0	  1	got /C/ but not linkup yet
   12526 	 *	1    1	  0	(linkup)
   12527 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12528 	 *
   12529 	 */
   12530 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12531 		DPRINTF(sc, WM_DEBUG_LINK,
   12532 		    ("%s: %s: force linkup and fullduplex\n",
   12533 			device_xname(sc->sc_dev), __func__));
   12534 		sc->sc_tbi_linkup = 0;
   12535 		/* Disable auto-negotiation in the TXCW register */
   12536 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12537 
   12538 		/*
   12539 		 * Force link-up and also force full-duplex.
   12540 		 *
   12541 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12542 		 * so we should update sc->sc_ctrl
   12543 		 */
   12544 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12545 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12546 	} else if (((status & STATUS_LU) != 0)
   12547 	    && ((rxcw & RXCW_C) != 0)
   12548 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12549 		sc->sc_tbi_linkup = 1;
   12550 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12551 			device_xname(sc->sc_dev),
   12552 			__func__));
   12553 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12554 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12555 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12556 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   12557 			device_xname(sc->sc_dev), __func__));
   12558 	} else {
   12559 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12560 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12561 			status));
   12562 	}
   12563 
   12564 	return 0;
   12565 }
   12566 
   12567 /*
   12568  * wm_tbi_tick:
   12569  *
   12570  *	Check the link on TBI devices.
   12571  *	This function acts as mii_tick().
   12572  */
   12573 static void
   12574 wm_tbi_tick(struct wm_softc *sc)
   12575 {
   12576 	struct mii_data *mii = &sc->sc_mii;
   12577 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12578 	uint32_t status;
   12579 
   12580 	KASSERT(WM_CORE_LOCKED(sc));
   12581 
   12582 	status = CSR_READ(sc, WMREG_STATUS);
   12583 
   12584 	/* XXX is this needed? */
   12585 	(void)CSR_READ(sc, WMREG_RXCW);
   12586 	(void)CSR_READ(sc, WMREG_CTRL);
   12587 
   12588 	/* set link status */
   12589 	if ((status & STATUS_LU) == 0) {
   12590 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12591 			device_xname(sc->sc_dev)));
   12592 		sc->sc_tbi_linkup = 0;
   12593 	} else if (sc->sc_tbi_linkup == 0) {
   12594 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12595 			device_xname(sc->sc_dev),
   12596 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12597 		sc->sc_tbi_linkup = 1;
   12598 		sc->sc_tbi_serdes_ticks = 0;
   12599 	}
   12600 
   12601 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12602 		goto setled;
   12603 
   12604 	if ((status & STATUS_LU) == 0) {
   12605 		sc->sc_tbi_linkup = 0;
   12606 		/* If the timer expired, retry autonegotiation */
   12607 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12608 		    && (++sc->sc_tbi_serdes_ticks
   12609 			>= sc->sc_tbi_serdes_anegticks)) {
   12610 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12611 				device_xname(sc->sc_dev), __func__));
   12612 			sc->sc_tbi_serdes_ticks = 0;
   12613 			/*
   12614 			 * Reset the link, and let autonegotiation do
   12615 			 * its thing
   12616 			 */
   12617 			sc->sc_ctrl |= CTRL_LRST;
   12618 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12619 			CSR_WRITE_FLUSH(sc);
   12620 			delay(1000);
   12621 			sc->sc_ctrl &= ~CTRL_LRST;
   12622 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12623 			CSR_WRITE_FLUSH(sc);
   12624 			delay(1000);
   12625 			CSR_WRITE(sc, WMREG_TXCW,
   12626 			    sc->sc_txcw & ~TXCW_ANE);
   12627 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12628 		}
   12629 	}
   12630 
   12631 setled:
   12632 	wm_tbi_serdes_set_linkled(sc);
   12633 }
   12634 
   12635 /* SERDES related */
   12636 static void
   12637 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12638 {
   12639 	uint32_t reg;
   12640 
   12641 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12642 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12643 		return;
   12644 
   12645 	/* Enable PCS to turn on link */
   12646 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12647 	reg |= PCS_CFG_PCS_EN;
   12648 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12649 
   12650 	/* Power up the laser */
   12651 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12652 	reg &= ~CTRL_EXT_SWDPIN(3);
   12653 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12654 
   12655 	/* Flush the write to verify completion */
   12656 	CSR_WRITE_FLUSH(sc);
   12657 	delay(1000);
   12658 }
   12659 
   12660 static int
   12661 wm_serdes_mediachange(struct ifnet *ifp)
   12662 {
   12663 	struct wm_softc *sc = ifp->if_softc;
   12664 	bool pcs_autoneg = true; /* XXX */
   12665 	uint32_t ctrl_ext, pcs_lctl, reg;
   12666 
   12667 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12668 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12669 		return 0;
   12670 
   12671 	/* XXX Currently, this function is not called on 8257[12] */
   12672 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12673 	    || (sc->sc_type >= WM_T_82575))
   12674 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12675 
   12676 	/* Power on the sfp cage if present */
   12677 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12678 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12679 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12680 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12681 
   12682 	sc->sc_ctrl |= CTRL_SLU;
   12683 
   12684 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   12685 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12686 
   12687 		reg = CSR_READ(sc, WMREG_CONNSW);
   12688 		reg |= CONNSW_ENRGSRC;
   12689 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   12690 	}
   12691 
   12692 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12693 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12694 	case CTRL_EXT_LINK_MODE_SGMII:
   12695 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12696 		pcs_autoneg = true;
   12697 		/* Autoneg time out should be disabled for SGMII mode */
   12698 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12699 		break;
   12700 	case CTRL_EXT_LINK_MODE_1000KX:
   12701 		pcs_autoneg = false;
   12702 		/* FALLTHROUGH */
   12703 	default:
   12704 		if ((sc->sc_type == WM_T_82575)
   12705 		    || (sc->sc_type == WM_T_82576)) {
   12706 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12707 				pcs_autoneg = false;
   12708 		}
   12709 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12710 		    | CTRL_FRCFDX;
   12711 
   12712 		/* Set speed of 1000/Full if speed/duplex is forced */
   12713 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12714 	}
   12715 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12716 
   12717 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12718 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12719 
   12720 	if (pcs_autoneg) {
   12721 		/* Set PCS register for autoneg */
   12722 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12723 
   12724 		/* Disable force flow control for autoneg */
   12725 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12726 
   12727 		/* Configure flow control advertisement for autoneg */
   12728 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12729 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12730 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12731 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12732 	} else
   12733 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12734 
   12735 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12736 
   12737 	return 0;
   12738 }
   12739 
   12740 static void
   12741 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12742 {
   12743 	struct wm_softc *sc = ifp->if_softc;
   12744 	struct mii_data *mii = &sc->sc_mii;
   12745 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12746 	uint32_t pcs_adv, pcs_lpab, reg;
   12747 
   12748 	ifmr->ifm_status = IFM_AVALID;
   12749 	ifmr->ifm_active = IFM_ETHER;
   12750 
   12751 	/* Check PCS */
   12752 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12753 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12754 		ifmr->ifm_active |= IFM_NONE;
   12755 		sc->sc_tbi_linkup = 0;
   12756 		goto setled;
   12757 	}
   12758 
   12759 	sc->sc_tbi_linkup = 1;
   12760 	ifmr->ifm_status |= IFM_ACTIVE;
   12761 	if (sc->sc_type == WM_T_I354) {
   12762 		uint32_t status;
   12763 
   12764 		status = CSR_READ(sc, WMREG_STATUS);
   12765 		if (((status & STATUS_2P5_SKU) != 0)
   12766 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12767 			ifmr->ifm_active |= IFM_2500_KX;
   12768 		} else
   12769 			ifmr->ifm_active |= IFM_1000_KX;
   12770 	} else {
   12771 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12772 		case PCS_LSTS_SPEED_10:
   12773 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12774 			break;
   12775 		case PCS_LSTS_SPEED_100:
   12776 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12777 			break;
   12778 		case PCS_LSTS_SPEED_1000:
   12779 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12780 			break;
   12781 		default:
   12782 			device_printf(sc->sc_dev, "Unknown speed\n");
   12783 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12784 			break;
   12785 		}
   12786 	}
   12787 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   12788 	if ((reg & PCS_LSTS_FDX) != 0)
   12789 		ifmr->ifm_active |= IFM_FDX;
   12790 	else
   12791 		ifmr->ifm_active |= IFM_HDX;
   12792 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12793 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12794 		/* Check flow */
   12795 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12796 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12797 			DPRINTF(sc, WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12798 			goto setled;
   12799 		}
   12800 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12801 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12802 		DPRINTF(sc, WM_DEBUG_LINK,
   12803 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12804 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12805 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12806 			mii->mii_media_active |= IFM_FLOW
   12807 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12808 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12809 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12810 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12811 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12812 			mii->mii_media_active |= IFM_FLOW
   12813 			    | IFM_ETH_TXPAUSE;
   12814 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12815 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12816 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12817 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12818 			mii->mii_media_active |= IFM_FLOW
   12819 			    | IFM_ETH_RXPAUSE;
   12820 		}
   12821 	}
   12822 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12823 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12824 setled:
   12825 	wm_tbi_serdes_set_linkled(sc);
   12826 }
   12827 
   12828 /*
   12829  * wm_serdes_tick:
   12830  *
   12831  *	Check the link on serdes devices.
   12832  */
   12833 static void
   12834 wm_serdes_tick(struct wm_softc *sc)
   12835 {
   12836 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12837 	struct mii_data *mii = &sc->sc_mii;
   12838 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12839 	uint32_t reg;
   12840 
   12841 	KASSERT(WM_CORE_LOCKED(sc));
   12842 
   12843 	mii->mii_media_status = IFM_AVALID;
   12844 	mii->mii_media_active = IFM_ETHER;
   12845 
   12846 	/* Check PCS */
   12847 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12848 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12849 		mii->mii_media_status |= IFM_ACTIVE;
   12850 		sc->sc_tbi_linkup = 1;
   12851 		sc->sc_tbi_serdes_ticks = 0;
   12852 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12853 		if ((reg & PCS_LSTS_FDX) != 0)
   12854 			mii->mii_media_active |= IFM_FDX;
   12855 		else
   12856 			mii->mii_media_active |= IFM_HDX;
   12857 	} else {
   12858 		mii->mii_media_status |= IFM_NONE;
   12859 		sc->sc_tbi_linkup = 0;
   12860 		/* If the timer expired, retry autonegotiation */
   12861 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12862 		    && (++sc->sc_tbi_serdes_ticks
   12863 			>= sc->sc_tbi_serdes_anegticks)) {
   12864 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12865 				device_xname(sc->sc_dev), __func__));
   12866 			sc->sc_tbi_serdes_ticks = 0;
   12867 			/* XXX */
   12868 			wm_serdes_mediachange(ifp);
   12869 		}
   12870 	}
   12871 
   12872 	wm_tbi_serdes_set_linkled(sc);
   12873 }
   12874 
   12875 /* SFP related */
   12876 
   12877 static int
   12878 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12879 {
   12880 	uint32_t i2ccmd;
   12881 	int i;
   12882 
   12883 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12884 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12885 
   12886 	/* Poll the ready bit */
   12887 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12888 		delay(50);
   12889 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12890 		if (i2ccmd & I2CCMD_READY)
   12891 			break;
   12892 	}
   12893 	if ((i2ccmd & I2CCMD_READY) == 0)
   12894 		return -1;
   12895 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12896 		return -1;
   12897 
   12898 	*data = i2ccmd & 0x00ff;
   12899 
   12900 	return 0;
   12901 }
   12902 
   12903 static uint32_t
   12904 wm_sfp_get_media_type(struct wm_softc *sc)
   12905 {
   12906 	uint32_t ctrl_ext;
   12907 	uint8_t val = 0;
   12908 	int timeout = 3;
   12909 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12910 	int rv = -1;
   12911 
   12912 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12913 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12914 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12915 	CSR_WRITE_FLUSH(sc);
   12916 
   12917 	/* Read SFP module data */
   12918 	while (timeout) {
   12919 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12920 		if (rv == 0)
   12921 			break;
   12922 		delay(100*1000); /* XXX too big */
   12923 		timeout--;
   12924 	}
   12925 	if (rv != 0)
   12926 		goto out;
   12927 
   12928 	switch (val) {
   12929 	case SFF_SFP_ID_SFF:
   12930 		aprint_normal_dev(sc->sc_dev,
   12931 		    "Module/Connector soldered to board\n");
   12932 		break;
   12933 	case SFF_SFP_ID_SFP:
   12934 		sc->sc_flags |= WM_F_SFP;
   12935 		break;
   12936 	case SFF_SFP_ID_UNKNOWN:
   12937 		goto out;
   12938 	default:
   12939 		break;
   12940 	}
   12941 
   12942 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12943 	if (rv != 0)
   12944 		goto out;
   12945 
   12946 	sc->sc_sfptype = val;
   12947 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12948 		mediatype = WM_MEDIATYPE_SERDES;
   12949 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12950 		sc->sc_flags |= WM_F_SGMII;
   12951 		mediatype = WM_MEDIATYPE_COPPER;
   12952 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12953 		sc->sc_flags |= WM_F_SGMII;
   12954 		mediatype = WM_MEDIATYPE_SERDES;
   12955 	} else {
   12956 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   12957 		    __func__, sc->sc_sfptype);
   12958 		sc->sc_sfptype = 0; /* XXX unknown */
   12959 	}
   12960 
   12961 out:
   12962 	/* Restore I2C interface setting */
   12963 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12964 
   12965 	return mediatype;
   12966 }
   12967 
   12968 /*
   12969  * NVM related.
   12970  * Microwire, SPI (w/wo EERD) and Flash.
   12971  */
   12972 
   12973 /* Both spi and uwire */
   12974 
   12975 /*
   12976  * wm_eeprom_sendbits:
   12977  *
   12978  *	Send a series of bits to the EEPROM.
   12979  */
   12980 static void
   12981 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12982 {
   12983 	uint32_t reg;
   12984 	int x;
   12985 
   12986 	reg = CSR_READ(sc, WMREG_EECD);
   12987 
   12988 	for (x = nbits; x > 0; x--) {
   12989 		if (bits & (1U << (x - 1)))
   12990 			reg |= EECD_DI;
   12991 		else
   12992 			reg &= ~EECD_DI;
   12993 		CSR_WRITE(sc, WMREG_EECD, reg);
   12994 		CSR_WRITE_FLUSH(sc);
   12995 		delay(2);
   12996 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12997 		CSR_WRITE_FLUSH(sc);
   12998 		delay(2);
   12999 		CSR_WRITE(sc, WMREG_EECD, reg);
   13000 		CSR_WRITE_FLUSH(sc);
   13001 		delay(2);
   13002 	}
   13003 }
   13004 
   13005 /*
   13006  * wm_eeprom_recvbits:
   13007  *
   13008  *	Receive a series of bits from the EEPROM.
   13009  */
   13010 static void
   13011 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13012 {
   13013 	uint32_t reg, val;
   13014 	int x;
   13015 
   13016 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13017 
   13018 	val = 0;
   13019 	for (x = nbits; x > 0; x--) {
   13020 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13021 		CSR_WRITE_FLUSH(sc);
   13022 		delay(2);
   13023 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13024 			val |= (1U << (x - 1));
   13025 		CSR_WRITE(sc, WMREG_EECD, reg);
   13026 		CSR_WRITE_FLUSH(sc);
   13027 		delay(2);
   13028 	}
   13029 	*valp = val;
   13030 }
   13031 
   13032 /* Microwire */
   13033 
   13034 /*
   13035  * wm_nvm_read_uwire:
   13036  *
   13037  *	Read a word from the EEPROM using the MicroWire protocol.
   13038  */
   13039 static int
   13040 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13041 {
   13042 	uint32_t reg, val;
   13043 	int i;
   13044 
   13045 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13046 		device_xname(sc->sc_dev), __func__));
   13047 
   13048 	if (sc->nvm.acquire(sc) != 0)
   13049 		return -1;
   13050 
   13051 	for (i = 0; i < wordcnt; i++) {
   13052 		/* Clear SK and DI. */
   13053 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13054 		CSR_WRITE(sc, WMREG_EECD, reg);
   13055 
   13056 		/*
   13057 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13058 		 * and Xen.
   13059 		 *
   13060 		 * We use this workaround only for 82540 because qemu's
   13061 		 * e1000 act as 82540.
   13062 		 */
   13063 		if (sc->sc_type == WM_T_82540) {
   13064 			reg |= EECD_SK;
   13065 			CSR_WRITE(sc, WMREG_EECD, reg);
   13066 			reg &= ~EECD_SK;
   13067 			CSR_WRITE(sc, WMREG_EECD, reg);
   13068 			CSR_WRITE_FLUSH(sc);
   13069 			delay(2);
   13070 		}
   13071 		/* XXX: end of workaround */
   13072 
   13073 		/* Set CHIP SELECT. */
   13074 		reg |= EECD_CS;
   13075 		CSR_WRITE(sc, WMREG_EECD, reg);
   13076 		CSR_WRITE_FLUSH(sc);
   13077 		delay(2);
   13078 
   13079 		/* Shift in the READ command. */
   13080 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13081 
   13082 		/* Shift in address. */
   13083 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13084 
   13085 		/* Shift out the data. */
   13086 		wm_eeprom_recvbits(sc, &val, 16);
   13087 		data[i] = val & 0xffff;
   13088 
   13089 		/* Clear CHIP SELECT. */
   13090 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13091 		CSR_WRITE(sc, WMREG_EECD, reg);
   13092 		CSR_WRITE_FLUSH(sc);
   13093 		delay(2);
   13094 	}
   13095 
   13096 	sc->nvm.release(sc);
   13097 	return 0;
   13098 }
   13099 
   13100 /* SPI */
   13101 
   13102 /*
   13103  * Set SPI and FLASH related information from the EECD register.
   13104  * For 82541 and 82547, the word size is taken from EEPROM.
   13105  */
   13106 static int
   13107 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13108 {
   13109 	int size;
   13110 	uint32_t reg;
   13111 	uint16_t data;
   13112 
   13113 	reg = CSR_READ(sc, WMREG_EECD);
   13114 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13115 
   13116 	/* Read the size of NVM from EECD by default */
   13117 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13118 	switch (sc->sc_type) {
   13119 	case WM_T_82541:
   13120 	case WM_T_82541_2:
   13121 	case WM_T_82547:
   13122 	case WM_T_82547_2:
   13123 		/* Set dummy value to access EEPROM */
   13124 		sc->sc_nvm_wordsize = 64;
   13125 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13126 			aprint_error_dev(sc->sc_dev,
   13127 			    "%s: failed to read EEPROM size\n", __func__);
   13128 		}
   13129 		reg = data;
   13130 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13131 		if (size == 0)
   13132 			size = 6; /* 64 word size */
   13133 		else
   13134 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13135 		break;
   13136 	case WM_T_80003:
   13137 	case WM_T_82571:
   13138 	case WM_T_82572:
   13139 	case WM_T_82573: /* SPI case */
   13140 	case WM_T_82574: /* SPI case */
   13141 	case WM_T_82583: /* SPI case */
   13142 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13143 		if (size > 14)
   13144 			size = 14;
   13145 		break;
   13146 	case WM_T_82575:
   13147 	case WM_T_82576:
   13148 	case WM_T_82580:
   13149 	case WM_T_I350:
   13150 	case WM_T_I354:
   13151 	case WM_T_I210:
   13152 	case WM_T_I211:
   13153 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13154 		if (size > 15)
   13155 			size = 15;
   13156 		break;
   13157 	default:
   13158 		aprint_error_dev(sc->sc_dev,
   13159 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13160 		return -1;
   13161 		break;
   13162 	}
   13163 
   13164 	sc->sc_nvm_wordsize = 1 << size;
   13165 
   13166 	return 0;
   13167 }
   13168 
   13169 /*
   13170  * wm_nvm_ready_spi:
   13171  *
   13172  *	Wait for a SPI EEPROM to be ready for commands.
   13173  */
   13174 static int
   13175 wm_nvm_ready_spi(struct wm_softc *sc)
   13176 {
   13177 	uint32_t val;
   13178 	int usec;
   13179 
   13180 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13181 		device_xname(sc->sc_dev), __func__));
   13182 
   13183 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13184 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13185 		wm_eeprom_recvbits(sc, &val, 8);
   13186 		if ((val & SPI_SR_RDY) == 0)
   13187 			break;
   13188 	}
   13189 	if (usec >= SPI_MAX_RETRIES) {
   13190 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13191 		return -1;
   13192 	}
   13193 	return 0;
   13194 }
   13195 
   13196 /*
   13197  * wm_nvm_read_spi:
   13198  *
   13199  *	Read a work from the EEPROM using the SPI protocol.
   13200  */
   13201 static int
   13202 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13203 {
   13204 	uint32_t reg, val;
   13205 	int i;
   13206 	uint8_t opc;
   13207 	int rv = 0;
   13208 
   13209 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13210 		device_xname(sc->sc_dev), __func__));
   13211 
   13212 	if (sc->nvm.acquire(sc) != 0)
   13213 		return -1;
   13214 
   13215 	/* Clear SK and CS. */
   13216 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13217 	CSR_WRITE(sc, WMREG_EECD, reg);
   13218 	CSR_WRITE_FLUSH(sc);
   13219 	delay(2);
   13220 
   13221 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13222 		goto out;
   13223 
   13224 	/* Toggle CS to flush commands. */
   13225 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13226 	CSR_WRITE_FLUSH(sc);
   13227 	delay(2);
   13228 	CSR_WRITE(sc, WMREG_EECD, reg);
   13229 	CSR_WRITE_FLUSH(sc);
   13230 	delay(2);
   13231 
   13232 	opc = SPI_OPC_READ;
   13233 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13234 		opc |= SPI_OPC_A8;
   13235 
   13236 	wm_eeprom_sendbits(sc, opc, 8);
   13237 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13238 
   13239 	for (i = 0; i < wordcnt; i++) {
   13240 		wm_eeprom_recvbits(sc, &val, 16);
   13241 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13242 	}
   13243 
   13244 	/* Raise CS and clear SK. */
   13245 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13246 	CSR_WRITE(sc, WMREG_EECD, reg);
   13247 	CSR_WRITE_FLUSH(sc);
   13248 	delay(2);
   13249 
   13250 out:
   13251 	sc->nvm.release(sc);
   13252 	return rv;
   13253 }
   13254 
   13255 /* Using with EERD */
   13256 
   13257 static int
   13258 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13259 {
   13260 	uint32_t attempts = 100000;
   13261 	uint32_t i, reg = 0;
   13262 	int32_t done = -1;
   13263 
   13264 	for (i = 0; i < attempts; i++) {
   13265 		reg = CSR_READ(sc, rw);
   13266 
   13267 		if (reg & EERD_DONE) {
   13268 			done = 0;
   13269 			break;
   13270 		}
   13271 		delay(5);
   13272 	}
   13273 
   13274 	return done;
   13275 }
   13276 
   13277 static int
   13278 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13279 {
   13280 	int i, eerd = 0;
   13281 	int rv = 0;
   13282 
   13283 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13284 		device_xname(sc->sc_dev), __func__));
   13285 
   13286 	if (sc->nvm.acquire(sc) != 0)
   13287 		return -1;
   13288 
   13289 	for (i = 0; i < wordcnt; i++) {
   13290 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13291 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13292 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13293 		if (rv != 0) {
   13294 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13295 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13296 			break;
   13297 		}
   13298 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13299 	}
   13300 
   13301 	sc->nvm.release(sc);
   13302 	return rv;
   13303 }
   13304 
   13305 /* Flash */
   13306 
   13307 static int
   13308 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13309 {
   13310 	uint32_t eecd;
   13311 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13312 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13313 	uint32_t nvm_dword = 0;
   13314 	uint8_t sig_byte = 0;
   13315 	int rv;
   13316 
   13317 	switch (sc->sc_type) {
   13318 	case WM_T_PCH_SPT:
   13319 	case WM_T_PCH_CNP:
   13320 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13321 		act_offset = ICH_NVM_SIG_WORD * 2;
   13322 
   13323 		/* Set bank to 0 in case flash read fails. */
   13324 		*bank = 0;
   13325 
   13326 		/* Check bank 0 */
   13327 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13328 		if (rv != 0)
   13329 			return rv;
   13330 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13331 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13332 			*bank = 0;
   13333 			return 0;
   13334 		}
   13335 
   13336 		/* Check bank 1 */
   13337 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13338 		    &nvm_dword);
   13339 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13340 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13341 			*bank = 1;
   13342 			return 0;
   13343 		}
   13344 		aprint_error_dev(sc->sc_dev,
   13345 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13346 		return -1;
   13347 	case WM_T_ICH8:
   13348 	case WM_T_ICH9:
   13349 		eecd = CSR_READ(sc, WMREG_EECD);
   13350 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13351 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13352 			return 0;
   13353 		}
   13354 		/* FALLTHROUGH */
   13355 	default:
   13356 		/* Default to 0 */
   13357 		*bank = 0;
   13358 
   13359 		/* Check bank 0 */
   13360 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13361 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13362 			*bank = 0;
   13363 			return 0;
   13364 		}
   13365 
   13366 		/* Check bank 1 */
   13367 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13368 		    &sig_byte);
   13369 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13370 			*bank = 1;
   13371 			return 0;
   13372 		}
   13373 	}
   13374 
   13375 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13376 		device_xname(sc->sc_dev)));
   13377 	return -1;
   13378 }
   13379 
   13380 /******************************************************************************
   13381  * This function does initial flash setup so that a new read/write/erase cycle
   13382  * can be started.
   13383  *
   13384  * sc - The pointer to the hw structure
   13385  ****************************************************************************/
   13386 static int32_t
   13387 wm_ich8_cycle_init(struct wm_softc *sc)
   13388 {
   13389 	uint16_t hsfsts;
   13390 	int32_t error = 1;
   13391 	int32_t i     = 0;
   13392 
   13393 	if (sc->sc_type >= WM_T_PCH_SPT)
   13394 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13395 	else
   13396 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13397 
   13398 	/* May be check the Flash Des Valid bit in Hw status */
   13399 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13400 		return error;
   13401 
   13402 	/* Clear FCERR in Hw status by writing 1 */
   13403 	/* Clear DAEL in Hw status by writing a 1 */
   13404 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13405 
   13406 	if (sc->sc_type >= WM_T_PCH_SPT)
   13407 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13408 	else
   13409 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13410 
   13411 	/*
   13412 	 * Either we should have a hardware SPI cycle in progress bit to check
   13413 	 * against, in order to start a new cycle or FDONE bit should be
   13414 	 * changed in the hardware so that it is 1 after hardware reset, which
   13415 	 * can then be used as an indication whether a cycle is in progress or
   13416 	 * has been completed .. we should also have some software semaphore
   13417 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13418 	 * threads access to those bits can be sequentiallized or a way so that
   13419 	 * 2 threads don't start the cycle at the same time
   13420 	 */
   13421 
   13422 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13423 		/*
   13424 		 * There is no cycle running at present, so we can start a
   13425 		 * cycle
   13426 		 */
   13427 
   13428 		/* Begin by setting Flash Cycle Done. */
   13429 		hsfsts |= HSFSTS_DONE;
   13430 		if (sc->sc_type >= WM_T_PCH_SPT)
   13431 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13432 			    hsfsts & 0xffffUL);
   13433 		else
   13434 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13435 		error = 0;
   13436 	} else {
   13437 		/*
   13438 		 * Otherwise poll for sometime so the current cycle has a
   13439 		 * chance to end before giving up.
   13440 		 */
   13441 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13442 			if (sc->sc_type >= WM_T_PCH_SPT)
   13443 				hsfsts = ICH8_FLASH_READ32(sc,
   13444 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13445 			else
   13446 				hsfsts = ICH8_FLASH_READ16(sc,
   13447 				    ICH_FLASH_HSFSTS);
   13448 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13449 				error = 0;
   13450 				break;
   13451 			}
   13452 			delay(1);
   13453 		}
   13454 		if (error == 0) {
   13455 			/*
   13456 			 * Successful in waiting for previous cycle to timeout,
   13457 			 * now set the Flash Cycle Done.
   13458 			 */
   13459 			hsfsts |= HSFSTS_DONE;
   13460 			if (sc->sc_type >= WM_T_PCH_SPT)
   13461 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13462 				    hsfsts & 0xffffUL);
   13463 			else
   13464 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13465 				    hsfsts);
   13466 		}
   13467 	}
   13468 	return error;
   13469 }
   13470 
   13471 /******************************************************************************
   13472  * This function starts a flash cycle and waits for its completion
   13473  *
   13474  * sc - The pointer to the hw structure
   13475  ****************************************************************************/
   13476 static int32_t
   13477 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13478 {
   13479 	uint16_t hsflctl;
   13480 	uint16_t hsfsts;
   13481 	int32_t error = 1;
   13482 	uint32_t i = 0;
   13483 
   13484 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13485 	if (sc->sc_type >= WM_T_PCH_SPT)
   13486 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13487 	else
   13488 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13489 	hsflctl |= HSFCTL_GO;
   13490 	if (sc->sc_type >= WM_T_PCH_SPT)
   13491 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13492 		    (uint32_t)hsflctl << 16);
   13493 	else
   13494 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13495 
   13496 	/* Wait till FDONE bit is set to 1 */
   13497 	do {
   13498 		if (sc->sc_type >= WM_T_PCH_SPT)
   13499 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13500 			    & 0xffffUL;
   13501 		else
   13502 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13503 		if (hsfsts & HSFSTS_DONE)
   13504 			break;
   13505 		delay(1);
   13506 		i++;
   13507 	} while (i < timeout);
   13508 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13509 		error = 0;
   13510 
   13511 	return error;
   13512 }
   13513 
   13514 /******************************************************************************
   13515  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13516  *
   13517  * sc - The pointer to the hw structure
   13518  * index - The index of the byte or word to read.
   13519  * size - Size of data to read, 1=byte 2=word, 4=dword
   13520  * data - Pointer to the word to store the value read.
   13521  *****************************************************************************/
   13522 static int32_t
   13523 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13524     uint32_t size, uint32_t *data)
   13525 {
   13526 	uint16_t hsfsts;
   13527 	uint16_t hsflctl;
   13528 	uint32_t flash_linear_address;
   13529 	uint32_t flash_data = 0;
   13530 	int32_t error = 1;
   13531 	int32_t count = 0;
   13532 
   13533 	if (size < 1  || size > 4 || data == 0x0 ||
   13534 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13535 		return error;
   13536 
   13537 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13538 	    sc->sc_ich8_flash_base;
   13539 
   13540 	do {
   13541 		delay(1);
   13542 		/* Steps */
   13543 		error = wm_ich8_cycle_init(sc);
   13544 		if (error)
   13545 			break;
   13546 
   13547 		if (sc->sc_type >= WM_T_PCH_SPT)
   13548 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13549 			    >> 16;
   13550 		else
   13551 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13552 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13553 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13554 		    & HSFCTL_BCOUNT_MASK;
   13555 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13556 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13557 			/*
   13558 			 * In SPT, This register is in Lan memory space, not
   13559 			 * flash. Therefore, only 32 bit access is supported.
   13560 			 */
   13561 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13562 			    (uint32_t)hsflctl << 16);
   13563 		} else
   13564 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13565 
   13566 		/*
   13567 		 * Write the last 24 bits of index into Flash Linear address
   13568 		 * field in Flash Address
   13569 		 */
   13570 		/* TODO: TBD maybe check the index against the size of flash */
   13571 
   13572 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13573 
   13574 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13575 
   13576 		/*
   13577 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13578 		 * the whole sequence a few more times, else read in (shift in)
   13579 		 * the Flash Data0, the order is least significant byte first
   13580 		 * msb to lsb
   13581 		 */
   13582 		if (error == 0) {
   13583 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13584 			if (size == 1)
   13585 				*data = (uint8_t)(flash_data & 0x000000FF);
   13586 			else if (size == 2)
   13587 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13588 			else if (size == 4)
   13589 				*data = (uint32_t)flash_data;
   13590 			break;
   13591 		} else {
   13592 			/*
   13593 			 * If we've gotten here, then things are probably
   13594 			 * completely hosed, but if the error condition is
   13595 			 * detected, it won't hurt to give it another try...
   13596 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13597 			 */
   13598 			if (sc->sc_type >= WM_T_PCH_SPT)
   13599 				hsfsts = ICH8_FLASH_READ32(sc,
   13600 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13601 			else
   13602 				hsfsts = ICH8_FLASH_READ16(sc,
   13603 				    ICH_FLASH_HSFSTS);
   13604 
   13605 			if (hsfsts & HSFSTS_ERR) {
   13606 				/* Repeat for some time before giving up. */
   13607 				continue;
   13608 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13609 				break;
   13610 		}
   13611 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13612 
   13613 	return error;
   13614 }
   13615 
   13616 /******************************************************************************
   13617  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13618  *
   13619  * sc - pointer to wm_hw structure
   13620  * index - The index of the byte to read.
   13621  * data - Pointer to a byte to store the value read.
   13622  *****************************************************************************/
   13623 static int32_t
   13624 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13625 {
   13626 	int32_t status;
   13627 	uint32_t word = 0;
   13628 
   13629 	status = wm_read_ich8_data(sc, index, 1, &word);
   13630 	if (status == 0)
   13631 		*data = (uint8_t)word;
   13632 	else
   13633 		*data = 0;
   13634 
   13635 	return status;
   13636 }
   13637 
   13638 /******************************************************************************
   13639  * Reads a word from the NVM using the ICH8 flash access registers.
   13640  *
   13641  * sc - pointer to wm_hw structure
   13642  * index - The starting byte index of the word to read.
   13643  * data - Pointer to a word to store the value read.
   13644  *****************************************************************************/
   13645 static int32_t
   13646 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13647 {
   13648 	int32_t status;
   13649 	uint32_t word = 0;
   13650 
   13651 	status = wm_read_ich8_data(sc, index, 2, &word);
   13652 	if (status == 0)
   13653 		*data = (uint16_t)word;
   13654 	else
   13655 		*data = 0;
   13656 
   13657 	return status;
   13658 }
   13659 
   13660 /******************************************************************************
   13661  * Reads a dword from the NVM using the ICH8 flash access registers.
   13662  *
   13663  * sc - pointer to wm_hw structure
   13664  * index - The starting byte index of the word to read.
   13665  * data - Pointer to a word to store the value read.
   13666  *****************************************************************************/
   13667 static int32_t
   13668 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13669 {
   13670 	int32_t status;
   13671 
   13672 	status = wm_read_ich8_data(sc, index, 4, data);
   13673 	return status;
   13674 }
   13675 
   13676 /******************************************************************************
   13677  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13678  * register.
   13679  *
   13680  * sc - Struct containing variables accessed by shared code
   13681  * offset - offset of word in the EEPROM to read
   13682  * data - word read from the EEPROM
   13683  * words - number of words to read
   13684  *****************************************************************************/
   13685 static int
   13686 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13687 {
   13688 	int32_t	 rv = 0;
   13689 	uint32_t flash_bank = 0;
   13690 	uint32_t act_offset = 0;
   13691 	uint32_t bank_offset = 0;
   13692 	uint16_t word = 0;
   13693 	uint16_t i = 0;
   13694 
   13695 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13696 		device_xname(sc->sc_dev), __func__));
   13697 
   13698 	if (sc->nvm.acquire(sc) != 0)
   13699 		return -1;
   13700 
   13701 	/*
   13702 	 * We need to know which is the valid flash bank.  In the event
   13703 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13704 	 * managing flash_bank. So it cannot be trusted and needs
   13705 	 * to be updated with each read.
   13706 	 */
   13707 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13708 	if (rv) {
   13709 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13710 			device_xname(sc->sc_dev)));
   13711 		flash_bank = 0;
   13712 	}
   13713 
   13714 	/*
   13715 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13716 	 * size
   13717 	 */
   13718 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13719 
   13720 	for (i = 0; i < words; i++) {
   13721 		/* The NVM part needs a byte offset, hence * 2 */
   13722 		act_offset = bank_offset + ((offset + i) * 2);
   13723 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13724 		if (rv) {
   13725 			aprint_error_dev(sc->sc_dev,
   13726 			    "%s: failed to read NVM\n", __func__);
   13727 			break;
   13728 		}
   13729 		data[i] = word;
   13730 	}
   13731 
   13732 	sc->nvm.release(sc);
   13733 	return rv;
   13734 }
   13735 
   13736 /******************************************************************************
   13737  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13738  * register.
   13739  *
   13740  * sc - Struct containing variables accessed by shared code
   13741  * offset - offset of word in the EEPROM to read
   13742  * data - word read from the EEPROM
   13743  * words - number of words to read
   13744  *****************************************************************************/
   13745 static int
   13746 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13747 {
   13748 	int32_t	 rv = 0;
   13749 	uint32_t flash_bank = 0;
   13750 	uint32_t act_offset = 0;
   13751 	uint32_t bank_offset = 0;
   13752 	uint32_t dword = 0;
   13753 	uint16_t i = 0;
   13754 
   13755 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13756 		device_xname(sc->sc_dev), __func__));
   13757 
   13758 	if (sc->nvm.acquire(sc) != 0)
   13759 		return -1;
   13760 
   13761 	/*
   13762 	 * We need to know which is the valid flash bank.  In the event
   13763 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13764 	 * managing flash_bank. So it cannot be trusted and needs
   13765 	 * to be updated with each read.
   13766 	 */
   13767 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13768 	if (rv) {
   13769 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13770 			device_xname(sc->sc_dev)));
   13771 		flash_bank = 0;
   13772 	}
   13773 
   13774 	/*
   13775 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13776 	 * size
   13777 	 */
   13778 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13779 
   13780 	for (i = 0; i < words; i++) {
   13781 		/* The NVM part needs a byte offset, hence * 2 */
   13782 		act_offset = bank_offset + ((offset + i) * 2);
   13783 		/* but we must read dword aligned, so mask ... */
   13784 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13785 		if (rv) {
   13786 			aprint_error_dev(sc->sc_dev,
   13787 			    "%s: failed to read NVM\n", __func__);
   13788 			break;
   13789 		}
   13790 		/* ... and pick out low or high word */
   13791 		if ((act_offset & 0x2) == 0)
   13792 			data[i] = (uint16_t)(dword & 0xFFFF);
   13793 		else
   13794 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13795 	}
   13796 
   13797 	sc->nvm.release(sc);
   13798 	return rv;
   13799 }
   13800 
   13801 /* iNVM */
   13802 
   13803 static int
   13804 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13805 {
   13806 	int32_t	 rv = 0;
   13807 	uint32_t invm_dword;
   13808 	uint16_t i;
   13809 	uint8_t record_type, word_address;
   13810 
   13811 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13812 		device_xname(sc->sc_dev), __func__));
   13813 
   13814 	for (i = 0; i < INVM_SIZE; i++) {
   13815 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13816 		/* Get record type */
   13817 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13818 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13819 			break;
   13820 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13821 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13822 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13823 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13824 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13825 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13826 			if (word_address == address) {
   13827 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13828 				rv = 0;
   13829 				break;
   13830 			}
   13831 		}
   13832 	}
   13833 
   13834 	return rv;
   13835 }
   13836 
   13837 static int
   13838 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13839 {
   13840 	int rv = 0;
   13841 	int i;
   13842 
   13843 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13844 		device_xname(sc->sc_dev), __func__));
   13845 
   13846 	if (sc->nvm.acquire(sc) != 0)
   13847 		return -1;
   13848 
   13849 	for (i = 0; i < words; i++) {
   13850 		switch (offset + i) {
   13851 		case NVM_OFF_MACADDR:
   13852 		case NVM_OFF_MACADDR1:
   13853 		case NVM_OFF_MACADDR2:
   13854 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13855 			if (rv != 0) {
   13856 				data[i] = 0xffff;
   13857 				rv = -1;
   13858 			}
   13859 			break;
   13860 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   13861 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13862 			if (rv != 0) {
   13863 				*data = INVM_DEFAULT_AL;
   13864 				rv = 0;
   13865 			}
   13866 			break;
   13867 		case NVM_OFF_CFG2:
   13868 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13869 			if (rv != 0) {
   13870 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13871 				rv = 0;
   13872 			}
   13873 			break;
   13874 		case NVM_OFF_CFG4:
   13875 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13876 			if (rv != 0) {
   13877 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13878 				rv = 0;
   13879 			}
   13880 			break;
   13881 		case NVM_OFF_LED_1_CFG:
   13882 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13883 			if (rv != 0) {
   13884 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13885 				rv = 0;
   13886 			}
   13887 			break;
   13888 		case NVM_OFF_LED_0_2_CFG:
   13889 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13890 			if (rv != 0) {
   13891 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13892 				rv = 0;
   13893 			}
   13894 			break;
   13895 		case NVM_OFF_ID_LED_SETTINGS:
   13896 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13897 			if (rv != 0) {
   13898 				*data = ID_LED_RESERVED_FFFF;
   13899 				rv = 0;
   13900 			}
   13901 			break;
   13902 		default:
   13903 			DPRINTF(sc, WM_DEBUG_NVM,
   13904 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13905 			*data = NVM_RESERVED_WORD;
   13906 			break;
   13907 		}
   13908 	}
   13909 
   13910 	sc->nvm.release(sc);
   13911 	return rv;
   13912 }
   13913 
   13914 /* Lock, detecting NVM type, validate checksum, version and read */
   13915 
   13916 static int
   13917 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13918 {
   13919 	uint32_t eecd = 0;
   13920 
   13921 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13922 	    || sc->sc_type == WM_T_82583) {
   13923 		eecd = CSR_READ(sc, WMREG_EECD);
   13924 
   13925 		/* Isolate bits 15 & 16 */
   13926 		eecd = ((eecd >> 15) & 0x03);
   13927 
   13928 		/* If both bits are set, device is Flash type */
   13929 		if (eecd == 0x03)
   13930 			return 0;
   13931 	}
   13932 	return 1;
   13933 }
   13934 
   13935 static int
   13936 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13937 {
   13938 	uint32_t eec;
   13939 
   13940 	eec = CSR_READ(sc, WMREG_EEC);
   13941 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13942 		return 1;
   13943 
   13944 	return 0;
   13945 }
   13946 
   13947 /*
   13948  * wm_nvm_validate_checksum
   13949  *
   13950  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13951  */
   13952 static int
   13953 wm_nvm_validate_checksum(struct wm_softc *sc)
   13954 {
   13955 	uint16_t checksum;
   13956 	uint16_t eeprom_data;
   13957 #ifdef WM_DEBUG
   13958 	uint16_t csum_wordaddr, valid_checksum;
   13959 #endif
   13960 	int i;
   13961 
   13962 	checksum = 0;
   13963 
   13964 	/* Don't check for I211 */
   13965 	if (sc->sc_type == WM_T_I211)
   13966 		return 0;
   13967 
   13968 #ifdef WM_DEBUG
   13969 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13970 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13971 		csum_wordaddr = NVM_OFF_COMPAT;
   13972 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13973 	} else {
   13974 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13975 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13976 	}
   13977 
   13978 	/* Dump EEPROM image for debug */
   13979 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13980 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13981 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13982 		/* XXX PCH_SPT? */
   13983 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13984 		if ((eeprom_data & valid_checksum) == 0)
   13985 			DPRINTF(sc, WM_DEBUG_NVM,
   13986 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13987 				device_xname(sc->sc_dev), eeprom_data,
   13988 				    valid_checksum));
   13989 	}
   13990 
   13991 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   13992 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13993 		for (i = 0; i < NVM_SIZE; i++) {
   13994 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13995 				printf("XXXX ");
   13996 			else
   13997 				printf("%04hx ", eeprom_data);
   13998 			if (i % 8 == 7)
   13999 				printf("\n");
   14000 		}
   14001 	}
   14002 
   14003 #endif /* WM_DEBUG */
   14004 
   14005 	for (i = 0; i < NVM_SIZE; i++) {
   14006 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14007 			return 1;
   14008 		checksum += eeprom_data;
   14009 	}
   14010 
   14011 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14012 #ifdef WM_DEBUG
   14013 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14014 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14015 #endif
   14016 	}
   14017 
   14018 	return 0;
   14019 }
   14020 
   14021 static void
   14022 wm_nvm_version_invm(struct wm_softc *sc)
   14023 {
   14024 	uint32_t dword;
   14025 
   14026 	/*
   14027 	 * Linux's code to decode version is very strange, so we don't
   14028 	 * obey that algorithm and just use word 61 as the document.
   14029 	 * Perhaps it's not perfect though...
   14030 	 *
   14031 	 * Example:
   14032 	 *
   14033 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14034 	 */
   14035 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14036 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14037 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14038 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14039 }
   14040 
   14041 static void
   14042 wm_nvm_version(struct wm_softc *sc)
   14043 {
   14044 	uint16_t major, minor, build, patch;
   14045 	uint16_t uid0, uid1;
   14046 	uint16_t nvm_data;
   14047 	uint16_t off;
   14048 	bool check_version = false;
   14049 	bool check_optionrom = false;
   14050 	bool have_build = false;
   14051 	bool have_uid = true;
   14052 
   14053 	/*
   14054 	 * Version format:
   14055 	 *
   14056 	 * XYYZ
   14057 	 * X0YZ
   14058 	 * X0YY
   14059 	 *
   14060 	 * Example:
   14061 	 *
   14062 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14063 	 *	82571	0x50a6	5.10.6?
   14064 	 *	82572	0x506a	5.6.10?
   14065 	 *	82572EI	0x5069	5.6.9?
   14066 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14067 	 *		0x2013	2.1.3?
   14068 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14069 	 * ICH8+82567	0x0040	0.4.0?
   14070 	 * ICH9+82566	0x1040	1.4.0?
   14071 	 *ICH10+82567	0x0043	0.4.3?
   14072 	 *  PCH+82577	0x00c1	0.12.1?
   14073 	 * PCH2+82579	0x00d3	0.13.3?
   14074 	 *		0x00d4	0.13.4?
   14075 	 *  LPT+I218	0x0023	0.2.3?
   14076 	 *  SPT+I219	0x0084	0.8.4?
   14077 	 *  CNP+I219	0x0054	0.5.4?
   14078 	 */
   14079 
   14080 	/*
   14081 	 * XXX
   14082 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14083 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   14084 	 */
   14085 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14086 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14087 		have_uid = false;
   14088 
   14089 	switch (sc->sc_type) {
   14090 	case WM_T_82571:
   14091 	case WM_T_82572:
   14092 	case WM_T_82574:
   14093 	case WM_T_82583:
   14094 		check_version = true;
   14095 		check_optionrom = true;
   14096 		have_build = true;
   14097 		break;
   14098 	case WM_T_ICH8:
   14099 	case WM_T_ICH9:
   14100 	case WM_T_ICH10:
   14101 	case WM_T_PCH:
   14102 	case WM_T_PCH2:
   14103 	case WM_T_PCH_LPT:
   14104 	case WM_T_PCH_SPT:
   14105 	case WM_T_PCH_CNP:
   14106 		check_version = true;
   14107 		have_build = true;
   14108 		have_uid = false;
   14109 		break;
   14110 	case WM_T_82575:
   14111 	case WM_T_82576:
   14112 	case WM_T_82580:
   14113 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14114 			check_version = true;
   14115 		break;
   14116 	case WM_T_I211:
   14117 		wm_nvm_version_invm(sc);
   14118 		have_uid = false;
   14119 		goto printver;
   14120 	case WM_T_I210:
   14121 		if (!wm_nvm_flash_presence_i210(sc)) {
   14122 			wm_nvm_version_invm(sc);
   14123 			have_uid = false;
   14124 			goto printver;
   14125 		}
   14126 		/* FALLTHROUGH */
   14127 	case WM_T_I350:
   14128 	case WM_T_I354:
   14129 		check_version = true;
   14130 		check_optionrom = true;
   14131 		break;
   14132 	default:
   14133 		return;
   14134 	}
   14135 	if (check_version
   14136 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14137 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14138 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14139 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14140 			build = nvm_data & NVM_BUILD_MASK;
   14141 			have_build = true;
   14142 		} else
   14143 			minor = nvm_data & 0x00ff;
   14144 
   14145 		/* Decimal */
   14146 		minor = (minor / 16) * 10 + (minor % 16);
   14147 		sc->sc_nvm_ver_major = major;
   14148 		sc->sc_nvm_ver_minor = minor;
   14149 
   14150 printver:
   14151 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14152 		    sc->sc_nvm_ver_minor);
   14153 		if (have_build) {
   14154 			sc->sc_nvm_ver_build = build;
   14155 			aprint_verbose(".%d", build);
   14156 		}
   14157 	}
   14158 
   14159 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14160 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14161 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14162 		/* Option ROM Version */
   14163 		if ((off != 0x0000) && (off != 0xffff)) {
   14164 			int rv;
   14165 
   14166 			off += NVM_COMBO_VER_OFF;
   14167 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14168 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14169 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14170 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14171 				/* 16bits */
   14172 				major = uid0 >> 8;
   14173 				build = (uid0 << 8) | (uid1 >> 8);
   14174 				patch = uid1 & 0x00ff;
   14175 				aprint_verbose(", option ROM Version %d.%d.%d",
   14176 				    major, build, patch);
   14177 			}
   14178 		}
   14179 	}
   14180 
   14181 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14182 		aprint_verbose(", Image Unique ID %08x",
   14183 		    ((uint32_t)uid1 << 16) | uid0);
   14184 }
   14185 
   14186 /*
   14187  * wm_nvm_read:
   14188  *
   14189  *	Read data from the serial EEPROM.
   14190  */
   14191 static int
   14192 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14193 {
   14194 	int rv;
   14195 
   14196 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14197 		device_xname(sc->sc_dev), __func__));
   14198 
   14199 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14200 		return -1;
   14201 
   14202 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14203 
   14204 	return rv;
   14205 }
   14206 
   14207 /*
   14208  * Hardware semaphores.
   14209  * Very complexed...
   14210  */
   14211 
   14212 static int
   14213 wm_get_null(struct wm_softc *sc)
   14214 {
   14215 
   14216 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14217 		device_xname(sc->sc_dev), __func__));
   14218 	return 0;
   14219 }
   14220 
   14221 static void
   14222 wm_put_null(struct wm_softc *sc)
   14223 {
   14224 
   14225 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14226 		device_xname(sc->sc_dev), __func__));
   14227 	return;
   14228 }
   14229 
   14230 static int
   14231 wm_get_eecd(struct wm_softc *sc)
   14232 {
   14233 	uint32_t reg;
   14234 	int x;
   14235 
   14236 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14237 		device_xname(sc->sc_dev), __func__));
   14238 
   14239 	reg = CSR_READ(sc, WMREG_EECD);
   14240 
   14241 	/* Request EEPROM access. */
   14242 	reg |= EECD_EE_REQ;
   14243 	CSR_WRITE(sc, WMREG_EECD, reg);
   14244 
   14245 	/* ..and wait for it to be granted. */
   14246 	for (x = 0; x < 1000; x++) {
   14247 		reg = CSR_READ(sc, WMREG_EECD);
   14248 		if (reg & EECD_EE_GNT)
   14249 			break;
   14250 		delay(5);
   14251 	}
   14252 	if ((reg & EECD_EE_GNT) == 0) {
   14253 		aprint_error_dev(sc->sc_dev,
   14254 		    "could not acquire EEPROM GNT\n");
   14255 		reg &= ~EECD_EE_REQ;
   14256 		CSR_WRITE(sc, WMREG_EECD, reg);
   14257 		return -1;
   14258 	}
   14259 
   14260 	return 0;
   14261 }
   14262 
   14263 static void
   14264 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14265 {
   14266 
   14267 	*eecd |= EECD_SK;
   14268 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14269 	CSR_WRITE_FLUSH(sc);
   14270 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14271 		delay(1);
   14272 	else
   14273 		delay(50);
   14274 }
   14275 
   14276 static void
   14277 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14278 {
   14279 
   14280 	*eecd &= ~EECD_SK;
   14281 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14282 	CSR_WRITE_FLUSH(sc);
   14283 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14284 		delay(1);
   14285 	else
   14286 		delay(50);
   14287 }
   14288 
   14289 static void
   14290 wm_put_eecd(struct wm_softc *sc)
   14291 {
   14292 	uint32_t reg;
   14293 
   14294 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14295 		device_xname(sc->sc_dev), __func__));
   14296 
   14297 	/* Stop nvm */
   14298 	reg = CSR_READ(sc, WMREG_EECD);
   14299 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14300 		/* Pull CS high */
   14301 		reg |= EECD_CS;
   14302 		wm_nvm_eec_clock_lower(sc, &reg);
   14303 	} else {
   14304 		/* CS on Microwire is active-high */
   14305 		reg &= ~(EECD_CS | EECD_DI);
   14306 		CSR_WRITE(sc, WMREG_EECD, reg);
   14307 		wm_nvm_eec_clock_raise(sc, &reg);
   14308 		wm_nvm_eec_clock_lower(sc, &reg);
   14309 	}
   14310 
   14311 	reg = CSR_READ(sc, WMREG_EECD);
   14312 	reg &= ~EECD_EE_REQ;
   14313 	CSR_WRITE(sc, WMREG_EECD, reg);
   14314 
   14315 	return;
   14316 }
   14317 
   14318 /*
   14319  * Get hardware semaphore.
   14320  * Same as e1000_get_hw_semaphore_generic()
   14321  */
   14322 static int
   14323 wm_get_swsm_semaphore(struct wm_softc *sc)
   14324 {
   14325 	int32_t timeout;
   14326 	uint32_t swsm;
   14327 
   14328 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14329 		device_xname(sc->sc_dev), __func__));
   14330 	KASSERT(sc->sc_nvm_wordsize > 0);
   14331 
   14332 retry:
   14333 	/* Get the SW semaphore. */
   14334 	timeout = sc->sc_nvm_wordsize + 1;
   14335 	while (timeout) {
   14336 		swsm = CSR_READ(sc, WMREG_SWSM);
   14337 
   14338 		if ((swsm & SWSM_SMBI) == 0)
   14339 			break;
   14340 
   14341 		delay(50);
   14342 		timeout--;
   14343 	}
   14344 
   14345 	if (timeout == 0) {
   14346 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14347 			/*
   14348 			 * In rare circumstances, the SW semaphore may already
   14349 			 * be held unintentionally. Clear the semaphore once
   14350 			 * before giving up.
   14351 			 */
   14352 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14353 			wm_put_swsm_semaphore(sc);
   14354 			goto retry;
   14355 		}
   14356 		aprint_error_dev(sc->sc_dev,
   14357 		    "could not acquire SWSM SMBI\n");
   14358 		return 1;
   14359 	}
   14360 
   14361 	/* Get the FW semaphore. */
   14362 	timeout = sc->sc_nvm_wordsize + 1;
   14363 	while (timeout) {
   14364 		swsm = CSR_READ(sc, WMREG_SWSM);
   14365 		swsm |= SWSM_SWESMBI;
   14366 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14367 		/* If we managed to set the bit we got the semaphore. */
   14368 		swsm = CSR_READ(sc, WMREG_SWSM);
   14369 		if (swsm & SWSM_SWESMBI)
   14370 			break;
   14371 
   14372 		delay(50);
   14373 		timeout--;
   14374 	}
   14375 
   14376 	if (timeout == 0) {
   14377 		aprint_error_dev(sc->sc_dev,
   14378 		    "could not acquire SWSM SWESMBI\n");
   14379 		/* Release semaphores */
   14380 		wm_put_swsm_semaphore(sc);
   14381 		return 1;
   14382 	}
   14383 	return 0;
   14384 }
   14385 
   14386 /*
   14387  * Put hardware semaphore.
   14388  * Same as e1000_put_hw_semaphore_generic()
   14389  */
   14390 static void
   14391 wm_put_swsm_semaphore(struct wm_softc *sc)
   14392 {
   14393 	uint32_t swsm;
   14394 
   14395 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14396 		device_xname(sc->sc_dev), __func__));
   14397 
   14398 	swsm = CSR_READ(sc, WMREG_SWSM);
   14399 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14400 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14401 }
   14402 
   14403 /*
   14404  * Get SW/FW semaphore.
   14405  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14406  */
   14407 static int
   14408 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14409 {
   14410 	uint32_t swfw_sync;
   14411 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14412 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14413 	int timeout;
   14414 
   14415 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14416 		device_xname(sc->sc_dev), __func__));
   14417 
   14418 	if (sc->sc_type == WM_T_80003)
   14419 		timeout = 50;
   14420 	else
   14421 		timeout = 200;
   14422 
   14423 	while (timeout) {
   14424 		if (wm_get_swsm_semaphore(sc)) {
   14425 			aprint_error_dev(sc->sc_dev,
   14426 			    "%s: failed to get semaphore\n",
   14427 			    __func__);
   14428 			return 1;
   14429 		}
   14430 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14431 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14432 			swfw_sync |= swmask;
   14433 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14434 			wm_put_swsm_semaphore(sc);
   14435 			return 0;
   14436 		}
   14437 		wm_put_swsm_semaphore(sc);
   14438 		delay(5000);
   14439 		timeout--;
   14440 	}
   14441 	device_printf(sc->sc_dev,
   14442 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14443 	    mask, swfw_sync);
   14444 	return 1;
   14445 }
   14446 
   14447 static void
   14448 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14449 {
   14450 	uint32_t swfw_sync;
   14451 
   14452 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14453 		device_xname(sc->sc_dev), __func__));
   14454 
   14455 	while (wm_get_swsm_semaphore(sc) != 0)
   14456 		continue;
   14457 
   14458 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14459 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14460 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14461 
   14462 	wm_put_swsm_semaphore(sc);
   14463 }
   14464 
   14465 static int
   14466 wm_get_nvm_80003(struct wm_softc *sc)
   14467 {
   14468 	int rv;
   14469 
   14470 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14471 		device_xname(sc->sc_dev), __func__));
   14472 
   14473 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14474 		aprint_error_dev(sc->sc_dev,
   14475 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14476 		return rv;
   14477 	}
   14478 
   14479 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14480 	    && (rv = wm_get_eecd(sc)) != 0) {
   14481 		aprint_error_dev(sc->sc_dev,
   14482 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14483 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14484 		return rv;
   14485 	}
   14486 
   14487 	return 0;
   14488 }
   14489 
   14490 static void
   14491 wm_put_nvm_80003(struct wm_softc *sc)
   14492 {
   14493 
   14494 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14495 		device_xname(sc->sc_dev), __func__));
   14496 
   14497 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14498 		wm_put_eecd(sc);
   14499 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14500 }
   14501 
   14502 static int
   14503 wm_get_nvm_82571(struct wm_softc *sc)
   14504 {
   14505 	int rv;
   14506 
   14507 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14508 		device_xname(sc->sc_dev), __func__));
   14509 
   14510 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14511 		return rv;
   14512 
   14513 	switch (sc->sc_type) {
   14514 	case WM_T_82573:
   14515 		break;
   14516 	default:
   14517 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14518 			rv = wm_get_eecd(sc);
   14519 		break;
   14520 	}
   14521 
   14522 	if (rv != 0) {
   14523 		aprint_error_dev(sc->sc_dev,
   14524 		    "%s: failed to get semaphore\n",
   14525 		    __func__);
   14526 		wm_put_swsm_semaphore(sc);
   14527 	}
   14528 
   14529 	return rv;
   14530 }
   14531 
   14532 static void
   14533 wm_put_nvm_82571(struct wm_softc *sc)
   14534 {
   14535 
   14536 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14537 		device_xname(sc->sc_dev), __func__));
   14538 
   14539 	switch (sc->sc_type) {
   14540 	case WM_T_82573:
   14541 		break;
   14542 	default:
   14543 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14544 			wm_put_eecd(sc);
   14545 		break;
   14546 	}
   14547 
   14548 	wm_put_swsm_semaphore(sc);
   14549 }
   14550 
   14551 static int
   14552 wm_get_phy_82575(struct wm_softc *sc)
   14553 {
   14554 
   14555 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14556 		device_xname(sc->sc_dev), __func__));
   14557 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14558 }
   14559 
   14560 static void
   14561 wm_put_phy_82575(struct wm_softc *sc)
   14562 {
   14563 
   14564 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14565 		device_xname(sc->sc_dev), __func__));
   14566 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14567 }
   14568 
   14569 static int
   14570 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14571 {
   14572 	uint32_t ext_ctrl;
   14573 	int timeout = 200;
   14574 
   14575 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14576 		device_xname(sc->sc_dev), __func__));
   14577 
   14578 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14579 	for (timeout = 0; timeout < 200; timeout++) {
   14580 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14581 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14582 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14583 
   14584 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14585 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14586 			return 0;
   14587 		delay(5000);
   14588 	}
   14589 	device_printf(sc->sc_dev,
   14590 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14591 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14592 	return 1;
   14593 }
   14594 
   14595 static void
   14596 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14597 {
   14598 	uint32_t ext_ctrl;
   14599 
   14600 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14601 		device_xname(sc->sc_dev), __func__));
   14602 
   14603 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14604 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14605 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14606 
   14607 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14608 }
   14609 
   14610 static int
   14611 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14612 {
   14613 	uint32_t ext_ctrl;
   14614 	int timeout;
   14615 
   14616 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14617 		device_xname(sc->sc_dev), __func__));
   14618 	mutex_enter(sc->sc_ich_phymtx);
   14619 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14620 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14621 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14622 			break;
   14623 		delay(1000);
   14624 	}
   14625 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14626 		device_printf(sc->sc_dev,
   14627 		    "SW has already locked the resource\n");
   14628 		goto out;
   14629 	}
   14630 
   14631 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14632 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14633 	for (timeout = 0; timeout < 1000; timeout++) {
   14634 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14635 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14636 			break;
   14637 		delay(1000);
   14638 	}
   14639 	if (timeout >= 1000) {
   14640 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14641 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14642 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14643 		goto out;
   14644 	}
   14645 	return 0;
   14646 
   14647 out:
   14648 	mutex_exit(sc->sc_ich_phymtx);
   14649 	return 1;
   14650 }
   14651 
   14652 static void
   14653 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14654 {
   14655 	uint32_t ext_ctrl;
   14656 
   14657 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14658 		device_xname(sc->sc_dev), __func__));
   14659 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14660 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14661 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14662 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14663 	} else {
   14664 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14665 	}
   14666 
   14667 	mutex_exit(sc->sc_ich_phymtx);
   14668 }
   14669 
   14670 static int
   14671 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14672 {
   14673 
   14674 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14675 		device_xname(sc->sc_dev), __func__));
   14676 	mutex_enter(sc->sc_ich_nvmmtx);
   14677 
   14678 	return 0;
   14679 }
   14680 
   14681 static void
   14682 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14683 {
   14684 
   14685 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14686 		device_xname(sc->sc_dev), __func__));
   14687 	mutex_exit(sc->sc_ich_nvmmtx);
   14688 }
   14689 
   14690 static int
   14691 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14692 {
   14693 	int i = 0;
   14694 	uint32_t reg;
   14695 
   14696 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14697 		device_xname(sc->sc_dev), __func__));
   14698 
   14699 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14700 	do {
   14701 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14702 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14703 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14704 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14705 			break;
   14706 		delay(2*1000);
   14707 		i++;
   14708 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14709 
   14710 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14711 		wm_put_hw_semaphore_82573(sc);
   14712 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14713 		    device_xname(sc->sc_dev));
   14714 		return -1;
   14715 	}
   14716 
   14717 	return 0;
   14718 }
   14719 
   14720 static void
   14721 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14722 {
   14723 	uint32_t reg;
   14724 
   14725 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14726 		device_xname(sc->sc_dev), __func__));
   14727 
   14728 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14729 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14730 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14731 }
   14732 
   14733 /*
   14734  * Management mode and power management related subroutines.
   14735  * BMC, AMT, suspend/resume and EEE.
   14736  */
   14737 
   14738 #ifdef WM_WOL
   14739 static int
   14740 wm_check_mng_mode(struct wm_softc *sc)
   14741 {
   14742 	int rv;
   14743 
   14744 	switch (sc->sc_type) {
   14745 	case WM_T_ICH8:
   14746 	case WM_T_ICH9:
   14747 	case WM_T_ICH10:
   14748 	case WM_T_PCH:
   14749 	case WM_T_PCH2:
   14750 	case WM_T_PCH_LPT:
   14751 	case WM_T_PCH_SPT:
   14752 	case WM_T_PCH_CNP:
   14753 		rv = wm_check_mng_mode_ich8lan(sc);
   14754 		break;
   14755 	case WM_T_82574:
   14756 	case WM_T_82583:
   14757 		rv = wm_check_mng_mode_82574(sc);
   14758 		break;
   14759 	case WM_T_82571:
   14760 	case WM_T_82572:
   14761 	case WM_T_82573:
   14762 	case WM_T_80003:
   14763 		rv = wm_check_mng_mode_generic(sc);
   14764 		break;
   14765 	default:
   14766 		/* Noting to do */
   14767 		rv = 0;
   14768 		break;
   14769 	}
   14770 
   14771 	return rv;
   14772 }
   14773 
   14774 static int
   14775 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14776 {
   14777 	uint32_t fwsm;
   14778 
   14779 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14780 
   14781 	if (((fwsm & FWSM_FW_VALID) != 0)
   14782 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14783 		return 1;
   14784 
   14785 	return 0;
   14786 }
   14787 
   14788 static int
   14789 wm_check_mng_mode_82574(struct wm_softc *sc)
   14790 {
   14791 	uint16_t data;
   14792 
   14793 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14794 
   14795 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14796 		return 1;
   14797 
   14798 	return 0;
   14799 }
   14800 
   14801 static int
   14802 wm_check_mng_mode_generic(struct wm_softc *sc)
   14803 {
   14804 	uint32_t fwsm;
   14805 
   14806 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14807 
   14808 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14809 		return 1;
   14810 
   14811 	return 0;
   14812 }
   14813 #endif /* WM_WOL */
   14814 
   14815 static int
   14816 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14817 {
   14818 	uint32_t manc, fwsm, factps;
   14819 
   14820 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14821 		return 0;
   14822 
   14823 	manc = CSR_READ(sc, WMREG_MANC);
   14824 
   14825 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14826 		device_xname(sc->sc_dev), manc));
   14827 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14828 		return 0;
   14829 
   14830 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14831 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14832 		factps = CSR_READ(sc, WMREG_FACTPS);
   14833 		if (((factps & FACTPS_MNGCG) == 0)
   14834 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14835 			return 1;
   14836 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14837 		uint16_t data;
   14838 
   14839 		factps = CSR_READ(sc, WMREG_FACTPS);
   14840 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14841 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14842 			device_xname(sc->sc_dev), factps, data));
   14843 		if (((factps & FACTPS_MNGCG) == 0)
   14844 		    && ((data & NVM_CFG2_MNGM_MASK)
   14845 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14846 			return 1;
   14847 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14848 	    && ((manc & MANC_ASF_EN) == 0))
   14849 		return 1;
   14850 
   14851 	return 0;
   14852 }
   14853 
   14854 static bool
   14855 wm_phy_resetisblocked(struct wm_softc *sc)
   14856 {
   14857 	bool blocked = false;
   14858 	uint32_t reg;
   14859 	int i = 0;
   14860 
   14861 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14862 		device_xname(sc->sc_dev), __func__));
   14863 
   14864 	switch (sc->sc_type) {
   14865 	case WM_T_ICH8:
   14866 	case WM_T_ICH9:
   14867 	case WM_T_ICH10:
   14868 	case WM_T_PCH:
   14869 	case WM_T_PCH2:
   14870 	case WM_T_PCH_LPT:
   14871 	case WM_T_PCH_SPT:
   14872 	case WM_T_PCH_CNP:
   14873 		do {
   14874 			reg = CSR_READ(sc, WMREG_FWSM);
   14875 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14876 				blocked = true;
   14877 				delay(10*1000);
   14878 				continue;
   14879 			}
   14880 			blocked = false;
   14881 		} while (blocked && (i++ < 30));
   14882 		return blocked;
   14883 		break;
   14884 	case WM_T_82571:
   14885 	case WM_T_82572:
   14886 	case WM_T_82573:
   14887 	case WM_T_82574:
   14888 	case WM_T_82583:
   14889 	case WM_T_80003:
   14890 		reg = CSR_READ(sc, WMREG_MANC);
   14891 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14892 			return true;
   14893 		else
   14894 			return false;
   14895 		break;
   14896 	default:
   14897 		/* No problem */
   14898 		break;
   14899 	}
   14900 
   14901 	return false;
   14902 }
   14903 
   14904 static void
   14905 wm_get_hw_control(struct wm_softc *sc)
   14906 {
   14907 	uint32_t reg;
   14908 
   14909 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14910 		device_xname(sc->sc_dev), __func__));
   14911 
   14912 	if (sc->sc_type == WM_T_82573) {
   14913 		reg = CSR_READ(sc, WMREG_SWSM);
   14914 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14915 	} else if (sc->sc_type >= WM_T_82571) {
   14916 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14917 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14918 	}
   14919 }
   14920 
   14921 static void
   14922 wm_release_hw_control(struct wm_softc *sc)
   14923 {
   14924 	uint32_t reg;
   14925 
   14926 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14927 		device_xname(sc->sc_dev), __func__));
   14928 
   14929 	if (sc->sc_type == WM_T_82573) {
   14930 		reg = CSR_READ(sc, WMREG_SWSM);
   14931 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14932 	} else if (sc->sc_type >= WM_T_82571) {
   14933 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14934 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14935 	}
   14936 }
   14937 
   14938 static void
   14939 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14940 {
   14941 	uint32_t reg;
   14942 
   14943 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14944 		device_xname(sc->sc_dev), __func__));
   14945 
   14946 	if (sc->sc_type < WM_T_PCH2)
   14947 		return;
   14948 
   14949 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14950 
   14951 	if (gate)
   14952 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14953 	else
   14954 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14955 
   14956 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14957 }
   14958 
   14959 static int
   14960 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14961 {
   14962 	uint32_t fwsm, reg;
   14963 	int rv = 0;
   14964 
   14965 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14966 		device_xname(sc->sc_dev), __func__));
   14967 
   14968 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14969 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14970 
   14971 	/* Disable ULP */
   14972 	wm_ulp_disable(sc);
   14973 
   14974 	/* Acquire PHY semaphore */
   14975 	rv = sc->phy.acquire(sc);
   14976 	if (rv != 0) {
   14977 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   14978 		device_xname(sc->sc_dev), __func__));
   14979 		return -1;
   14980 	}
   14981 
   14982 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14983 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14984 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14985 	 */
   14986 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14987 	switch (sc->sc_type) {
   14988 	case WM_T_PCH_LPT:
   14989 	case WM_T_PCH_SPT:
   14990 	case WM_T_PCH_CNP:
   14991 		if (wm_phy_is_accessible_pchlan(sc))
   14992 			break;
   14993 
   14994 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14995 		 * forcing MAC to SMBus mode first.
   14996 		 */
   14997 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14998 		reg |= CTRL_EXT_FORCE_SMBUS;
   14999 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15000 #if 0
   15001 		/* XXX Isn't this required??? */
   15002 		CSR_WRITE_FLUSH(sc);
   15003 #endif
   15004 		/* Wait 50 milliseconds for MAC to finish any retries
   15005 		 * that it might be trying to perform from previous
   15006 		 * attempts to acknowledge any phy read requests.
   15007 		 */
   15008 		delay(50 * 1000);
   15009 		/* FALLTHROUGH */
   15010 	case WM_T_PCH2:
   15011 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15012 			break;
   15013 		/* FALLTHROUGH */
   15014 	case WM_T_PCH:
   15015 		if (sc->sc_type == WM_T_PCH)
   15016 			if ((fwsm & FWSM_FW_VALID) != 0)
   15017 				break;
   15018 
   15019 		if (wm_phy_resetisblocked(sc) == true) {
   15020 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15021 			break;
   15022 		}
   15023 
   15024 		/* Toggle LANPHYPC Value bit */
   15025 		wm_toggle_lanphypc_pch_lpt(sc);
   15026 
   15027 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15028 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15029 				break;
   15030 
   15031 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15032 			 * so ensure that the MAC is also out of SMBus mode
   15033 			 */
   15034 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15035 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15036 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15037 
   15038 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15039 				break;
   15040 			rv = -1;
   15041 		}
   15042 		break;
   15043 	default:
   15044 		break;
   15045 	}
   15046 
   15047 	/* Release semaphore */
   15048 	sc->phy.release(sc);
   15049 
   15050 	if (rv == 0) {
   15051 		/* Check to see if able to reset PHY.  Print error if not */
   15052 		if (wm_phy_resetisblocked(sc)) {
   15053 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15054 			goto out;
   15055 		}
   15056 
   15057 		/* Reset the PHY before any access to it.  Doing so, ensures
   15058 		 * that the PHY is in a known good state before we read/write
   15059 		 * PHY registers.  The generic reset is sufficient here,
   15060 		 * because we haven't determined the PHY type yet.
   15061 		 */
   15062 		if (wm_reset_phy(sc) != 0)
   15063 			goto out;
   15064 
   15065 		/* On a successful reset, possibly need to wait for the PHY
   15066 		 * to quiesce to an accessible state before returning control
   15067 		 * to the calling function.  If the PHY does not quiesce, then
   15068 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15069 		 *  the PHY is in.
   15070 		 */
   15071 		if (wm_phy_resetisblocked(sc))
   15072 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15073 	}
   15074 
   15075 out:
   15076 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15077 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15078 		delay(10*1000);
   15079 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15080 	}
   15081 
   15082 	return 0;
   15083 }
   15084 
   15085 static void
   15086 wm_init_manageability(struct wm_softc *sc)
   15087 {
   15088 
   15089 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15090 		device_xname(sc->sc_dev), __func__));
   15091 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15092 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15093 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15094 
   15095 		/* Disable hardware interception of ARP */
   15096 		manc &= ~MANC_ARP_EN;
   15097 
   15098 		/* Enable receiving management packets to the host */
   15099 		if (sc->sc_type >= WM_T_82571) {
   15100 			manc |= MANC_EN_MNG2HOST;
   15101 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15102 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15103 		}
   15104 
   15105 		CSR_WRITE(sc, WMREG_MANC, manc);
   15106 	}
   15107 }
   15108 
   15109 static void
   15110 wm_release_manageability(struct wm_softc *sc)
   15111 {
   15112 
   15113 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15114 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15115 
   15116 		manc |= MANC_ARP_EN;
   15117 		if (sc->sc_type >= WM_T_82571)
   15118 			manc &= ~MANC_EN_MNG2HOST;
   15119 
   15120 		CSR_WRITE(sc, WMREG_MANC, manc);
   15121 	}
   15122 }
   15123 
   15124 static void
   15125 wm_get_wakeup(struct wm_softc *sc)
   15126 {
   15127 
   15128 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15129 	switch (sc->sc_type) {
   15130 	case WM_T_82573:
   15131 	case WM_T_82583:
   15132 		sc->sc_flags |= WM_F_HAS_AMT;
   15133 		/* FALLTHROUGH */
   15134 	case WM_T_80003:
   15135 	case WM_T_82575:
   15136 	case WM_T_82576:
   15137 	case WM_T_82580:
   15138 	case WM_T_I350:
   15139 	case WM_T_I354:
   15140 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15141 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15142 		/* FALLTHROUGH */
   15143 	case WM_T_82541:
   15144 	case WM_T_82541_2:
   15145 	case WM_T_82547:
   15146 	case WM_T_82547_2:
   15147 	case WM_T_82571:
   15148 	case WM_T_82572:
   15149 	case WM_T_82574:
   15150 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15151 		break;
   15152 	case WM_T_ICH8:
   15153 	case WM_T_ICH9:
   15154 	case WM_T_ICH10:
   15155 	case WM_T_PCH:
   15156 	case WM_T_PCH2:
   15157 	case WM_T_PCH_LPT:
   15158 	case WM_T_PCH_SPT:
   15159 	case WM_T_PCH_CNP:
   15160 		sc->sc_flags |= WM_F_HAS_AMT;
   15161 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15162 		break;
   15163 	default:
   15164 		break;
   15165 	}
   15166 
   15167 	/* 1: HAS_MANAGE */
   15168 	if (wm_enable_mng_pass_thru(sc) != 0)
   15169 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15170 
   15171 	/*
   15172 	 * Note that the WOL flags is set after the resetting of the eeprom
   15173 	 * stuff
   15174 	 */
   15175 }
   15176 
   15177 /*
   15178  * Unconfigure Ultra Low Power mode.
   15179  * Only for I217 and newer (see below).
   15180  */
   15181 static int
   15182 wm_ulp_disable(struct wm_softc *sc)
   15183 {
   15184 	uint32_t reg;
   15185 	uint16_t phyreg;
   15186 	int i = 0, rv = 0;
   15187 
   15188 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15189 		device_xname(sc->sc_dev), __func__));
   15190 	/* Exclude old devices */
   15191 	if ((sc->sc_type < WM_T_PCH_LPT)
   15192 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15193 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15194 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15195 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15196 		return 0;
   15197 
   15198 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15199 		/* Request ME un-configure ULP mode in the PHY */
   15200 		reg = CSR_READ(sc, WMREG_H2ME);
   15201 		reg &= ~H2ME_ULP;
   15202 		reg |= H2ME_ENFORCE_SETTINGS;
   15203 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15204 
   15205 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15206 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15207 			if (i++ == 30) {
   15208 				device_printf(sc->sc_dev, "%s timed out\n",
   15209 				    __func__);
   15210 				return -1;
   15211 			}
   15212 			delay(10 * 1000);
   15213 		}
   15214 		reg = CSR_READ(sc, WMREG_H2ME);
   15215 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15216 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15217 
   15218 		return 0;
   15219 	}
   15220 
   15221 	/* Acquire semaphore */
   15222 	rv = sc->phy.acquire(sc);
   15223 	if (rv != 0) {
   15224 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15225 		device_xname(sc->sc_dev), __func__));
   15226 		return -1;
   15227 	}
   15228 
   15229 	/* Toggle LANPHYPC */
   15230 	wm_toggle_lanphypc_pch_lpt(sc);
   15231 
   15232 	/* Unforce SMBus mode in PHY */
   15233 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15234 	if (rv != 0) {
   15235 		uint32_t reg2;
   15236 
   15237 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15238 			__func__);
   15239 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15240 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15241 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15242 		delay(50 * 1000);
   15243 
   15244 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15245 		    &phyreg);
   15246 		if (rv != 0)
   15247 			goto release;
   15248 	}
   15249 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15250 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15251 
   15252 	/* Unforce SMBus mode in MAC */
   15253 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15254 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15255 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15256 
   15257 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15258 	if (rv != 0)
   15259 		goto release;
   15260 	phyreg |= HV_PM_CTRL_K1_ENA;
   15261 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15262 
   15263 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15264 		&phyreg);
   15265 	if (rv != 0)
   15266 		goto release;
   15267 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15268 	    | I218_ULP_CONFIG1_STICKY_ULP
   15269 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15270 	    | I218_ULP_CONFIG1_WOL_HOST
   15271 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15272 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15273 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15274 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15275 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15276 	phyreg |= I218_ULP_CONFIG1_START;
   15277 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15278 
   15279 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15280 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15281 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15282 
   15283 release:
   15284 	/* Release semaphore */
   15285 	sc->phy.release(sc);
   15286 	wm_gmii_reset(sc);
   15287 	delay(50 * 1000);
   15288 
   15289 	return rv;
   15290 }
   15291 
   15292 /* WOL in the newer chipset interfaces (pchlan) */
   15293 static int
   15294 wm_enable_phy_wakeup(struct wm_softc *sc)
   15295 {
   15296 	device_t dev = sc->sc_dev;
   15297 	uint32_t mreg, moff;
   15298 	uint16_t wuce, wuc, wufc, preg;
   15299 	int i, rv;
   15300 
   15301 	KASSERT(sc->sc_type >= WM_T_PCH);
   15302 
   15303 	/* Copy MAC RARs to PHY RARs */
   15304 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15305 
   15306 	/* Activate PHY wakeup */
   15307 	rv = sc->phy.acquire(sc);
   15308 	if (rv != 0) {
   15309 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15310 		    __func__);
   15311 		return rv;
   15312 	}
   15313 
   15314 	/*
   15315 	 * Enable access to PHY wakeup registers.
   15316 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15317 	 */
   15318 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15319 	if (rv != 0) {
   15320 		device_printf(dev,
   15321 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15322 		goto release;
   15323 	}
   15324 
   15325 	/* Copy MAC MTA to PHY MTA */
   15326 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15327 		uint16_t lo, hi;
   15328 
   15329 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15330 		lo = (uint16_t)(mreg & 0xffff);
   15331 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15332 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15333 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15334 	}
   15335 
   15336 	/* Configure PHY Rx Control register */
   15337 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15338 	mreg = CSR_READ(sc, WMREG_RCTL);
   15339 	if (mreg & RCTL_UPE)
   15340 		preg |= BM_RCTL_UPE;
   15341 	if (mreg & RCTL_MPE)
   15342 		preg |= BM_RCTL_MPE;
   15343 	preg &= ~(BM_RCTL_MO_MASK);
   15344 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15345 	if (moff != 0)
   15346 		preg |= moff << BM_RCTL_MO_SHIFT;
   15347 	if (mreg & RCTL_BAM)
   15348 		preg |= BM_RCTL_BAM;
   15349 	if (mreg & RCTL_PMCF)
   15350 		preg |= BM_RCTL_PMCF;
   15351 	mreg = CSR_READ(sc, WMREG_CTRL);
   15352 	if (mreg & CTRL_RFCE)
   15353 		preg |= BM_RCTL_RFCE;
   15354 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15355 
   15356 	wuc = WUC_APME | WUC_PME_EN;
   15357 	wufc = WUFC_MAG;
   15358 	/* Enable PHY wakeup in MAC register */
   15359 	CSR_WRITE(sc, WMREG_WUC,
   15360 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15361 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15362 
   15363 	/* Configure and enable PHY wakeup in PHY registers */
   15364 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15365 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15366 
   15367 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15368 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15369 
   15370 release:
   15371 	sc->phy.release(sc);
   15372 
   15373 	return 0;
   15374 }
   15375 
   15376 /* Power down workaround on D3 */
   15377 static void
   15378 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15379 {
   15380 	uint32_t reg;
   15381 	uint16_t phyreg;
   15382 	int i;
   15383 
   15384 	for (i = 0; i < 2; i++) {
   15385 		/* Disable link */
   15386 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15387 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15388 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15389 
   15390 		/*
   15391 		 * Call gig speed drop workaround on Gig disable before
   15392 		 * accessing any PHY registers
   15393 		 */
   15394 		if (sc->sc_type == WM_T_ICH8)
   15395 			wm_gig_downshift_workaround_ich8lan(sc);
   15396 
   15397 		/* Write VR power-down enable */
   15398 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15399 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15400 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15401 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15402 
   15403 		/* Read it back and test */
   15404 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15405 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15406 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15407 			break;
   15408 
   15409 		/* Issue PHY reset and repeat at most one more time */
   15410 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15411 	}
   15412 }
   15413 
   15414 /*
   15415  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15416  *  @sc: pointer to the HW structure
   15417  *
   15418  *  During S0 to Sx transition, it is possible the link remains at gig
   15419  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15420  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15421  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15422  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15423  *  needs to be written.
   15424  *  Parts that support (and are linked to a partner which support) EEE in
   15425  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15426  *  than 10Mbps w/o EEE.
   15427  */
   15428 static void
   15429 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15430 {
   15431 	device_t dev = sc->sc_dev;
   15432 	struct ethercom *ec = &sc->sc_ethercom;
   15433 	uint32_t phy_ctrl;
   15434 	int rv;
   15435 
   15436 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15437 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15438 
   15439 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15440 
   15441 	if (sc->sc_phytype == WMPHY_I217) {
   15442 		uint16_t devid = sc->sc_pcidevid;
   15443 
   15444 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15445 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15446 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15447 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15448 		    (sc->sc_type >= WM_T_PCH_SPT))
   15449 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15450 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15451 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15452 
   15453 		if (sc->phy.acquire(sc) != 0)
   15454 			goto out;
   15455 
   15456 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15457 			uint16_t eee_advert;
   15458 
   15459 			rv = wm_read_emi_reg_locked(dev,
   15460 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15461 			if (rv)
   15462 				goto release;
   15463 
   15464 			/*
   15465 			 * Disable LPLU if both link partners support 100BaseT
   15466 			 * EEE and 100Full is advertised on both ends of the
   15467 			 * link, and enable Auto Enable LPI since there will
   15468 			 * be no driver to enable LPI while in Sx.
   15469 			 */
   15470 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15471 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15472 				uint16_t anar, phy_reg;
   15473 
   15474 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15475 				    &anar);
   15476 				if (anar & ANAR_TX_FD) {
   15477 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15478 					    PHY_CTRL_NOND0A_LPLU);
   15479 
   15480 					/* Set Auto Enable LPI after link up */
   15481 					sc->phy.readreg_locked(dev, 2,
   15482 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15483 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15484 					sc->phy.writereg_locked(dev, 2,
   15485 					    I217_LPI_GPIO_CTRL, phy_reg);
   15486 				}
   15487 			}
   15488 		}
   15489 
   15490 		/*
   15491 		 * For i217 Intel Rapid Start Technology support,
   15492 		 * when the system is going into Sx and no manageability engine
   15493 		 * is present, the driver must configure proxy to reset only on
   15494 		 * power good.	LPI (Low Power Idle) state must also reset only
   15495 		 * on power good, as well as the MTA (Multicast table array).
   15496 		 * The SMBus release must also be disabled on LCD reset.
   15497 		 */
   15498 
   15499 		/*
   15500 		 * Enable MTA to reset for Intel Rapid Start Technology
   15501 		 * Support
   15502 		 */
   15503 
   15504 release:
   15505 		sc->phy.release(sc);
   15506 	}
   15507 out:
   15508 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15509 
   15510 	if (sc->sc_type == WM_T_ICH8)
   15511 		wm_gig_downshift_workaround_ich8lan(sc);
   15512 
   15513 	if (sc->sc_type >= WM_T_PCH) {
   15514 		wm_oem_bits_config_ich8lan(sc, false);
   15515 
   15516 		/* Reset PHY to activate OEM bits on 82577/8 */
   15517 		if (sc->sc_type == WM_T_PCH)
   15518 			wm_reset_phy(sc);
   15519 
   15520 		if (sc->phy.acquire(sc) != 0)
   15521 			return;
   15522 		wm_write_smbus_addr(sc);
   15523 		sc->phy.release(sc);
   15524 	}
   15525 }
   15526 
   15527 /*
   15528  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15529  *  @sc: pointer to the HW structure
   15530  *
   15531  *  During Sx to S0 transitions on non-managed devices or managed devices
   15532  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15533  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15534  *  the PHY.
   15535  *  On i217, setup Intel Rapid Start Technology.
   15536  */
   15537 static int
   15538 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15539 {
   15540 	device_t dev = sc->sc_dev;
   15541 	int rv;
   15542 
   15543 	if (sc->sc_type < WM_T_PCH2)
   15544 		return 0;
   15545 
   15546 	rv = wm_init_phy_workarounds_pchlan(sc);
   15547 	if (rv != 0)
   15548 		return -1;
   15549 
   15550 	/* For i217 Intel Rapid Start Technology support when the system
   15551 	 * is transitioning from Sx and no manageability engine is present
   15552 	 * configure SMBus to restore on reset, disable proxy, and enable
   15553 	 * the reset on MTA (Multicast table array).
   15554 	 */
   15555 	if (sc->sc_phytype == WMPHY_I217) {
   15556 		uint16_t phy_reg;
   15557 
   15558 		if (sc->phy.acquire(sc) != 0)
   15559 			return -1;
   15560 
   15561 		/* Clear Auto Enable LPI after link up */
   15562 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15563 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15564 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15565 
   15566 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15567 			/* Restore clear on SMB if no manageability engine
   15568 			 * is present
   15569 			 */
   15570 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15571 			    &phy_reg);
   15572 			if (rv != 0)
   15573 				goto release;
   15574 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15575 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15576 
   15577 			/* Disable Proxy */
   15578 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15579 		}
   15580 		/* Enable reset on MTA */
   15581 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15582 		if (rv != 0)
   15583 			goto release;
   15584 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15585 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15586 
   15587 release:
   15588 		sc->phy.release(sc);
   15589 		return rv;
   15590 	}
   15591 
   15592 	return 0;
   15593 }
   15594 
   15595 static void
   15596 wm_enable_wakeup(struct wm_softc *sc)
   15597 {
   15598 	uint32_t reg, pmreg;
   15599 	pcireg_t pmode;
   15600 	int rv = 0;
   15601 
   15602 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15603 		device_xname(sc->sc_dev), __func__));
   15604 
   15605 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15606 	    &pmreg, NULL) == 0)
   15607 		return;
   15608 
   15609 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15610 		goto pme;
   15611 
   15612 	/* Advertise the wakeup capability */
   15613 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15614 	    | CTRL_SWDPIN(3));
   15615 
   15616 	/* Keep the laser running on fiber adapters */
   15617 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15618 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15619 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15620 		reg |= CTRL_EXT_SWDPIN(3);
   15621 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15622 	}
   15623 
   15624 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15625 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15626 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15627 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15628 		wm_suspend_workarounds_ich8lan(sc);
   15629 
   15630 #if 0	/* For the multicast packet */
   15631 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15632 	reg |= WUFC_MC;
   15633 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15634 #endif
   15635 
   15636 	if (sc->sc_type >= WM_T_PCH) {
   15637 		rv = wm_enable_phy_wakeup(sc);
   15638 		if (rv != 0)
   15639 			goto pme;
   15640 	} else {
   15641 		/* Enable wakeup by the MAC */
   15642 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15643 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15644 	}
   15645 
   15646 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15647 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15648 		|| (sc->sc_type == WM_T_PCH2))
   15649 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15650 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15651 
   15652 pme:
   15653 	/* Request PME */
   15654 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15655 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   15656 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15657 		/* For WOL */
   15658 		pmode |= PCI_PMCSR_PME_EN;
   15659 	} else {
   15660 		/* Disable WOL */
   15661 		pmode &= ~PCI_PMCSR_PME_EN;
   15662 	}
   15663 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15664 }
   15665 
   15666 /* Disable ASPM L0s and/or L1 for workaround */
   15667 static void
   15668 wm_disable_aspm(struct wm_softc *sc)
   15669 {
   15670 	pcireg_t reg, mask = 0;
   15671 	unsigned const char *str = "";
   15672 
   15673 	/*
   15674 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15675 	 * space.
   15676 	 */
   15677 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15678 		return;
   15679 
   15680 	switch (sc->sc_type) {
   15681 	case WM_T_82571:
   15682 	case WM_T_82572:
   15683 		/*
   15684 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15685 		 * State Power management L1 State (ASPM L1).
   15686 		 */
   15687 		mask = PCIE_LCSR_ASPM_L1;
   15688 		str = "L1 is";
   15689 		break;
   15690 	case WM_T_82573:
   15691 	case WM_T_82574:
   15692 	case WM_T_82583:
   15693 		/*
   15694 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15695 		 *
   15696 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15697 		 * some chipset.  The document of 82574 and 82583 says that
   15698 		 * disabling L0s with some specific chipset is sufficient,
   15699 		 * but we follow as of the Intel em driver does.
   15700 		 *
   15701 		 * References:
   15702 		 * Errata 8 of the Specification Update of i82573.
   15703 		 * Errata 20 of the Specification Update of i82574.
   15704 		 * Errata 9 of the Specification Update of i82583.
   15705 		 */
   15706 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15707 		str = "L0s and L1 are";
   15708 		break;
   15709 	default:
   15710 		return;
   15711 	}
   15712 
   15713 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15714 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15715 	reg &= ~mask;
   15716 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15717 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15718 
   15719 	/* Print only in wm_attach() */
   15720 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15721 		aprint_verbose_dev(sc->sc_dev,
   15722 		    "ASPM %s disabled to workaround the errata.\n", str);
   15723 }
   15724 
   15725 /* LPLU */
   15726 
   15727 static void
   15728 wm_lplu_d0_disable(struct wm_softc *sc)
   15729 {
   15730 	struct mii_data *mii = &sc->sc_mii;
   15731 	uint32_t reg;
   15732 	uint16_t phyval;
   15733 
   15734 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15735 		device_xname(sc->sc_dev), __func__));
   15736 
   15737 	if (sc->sc_phytype == WMPHY_IFE)
   15738 		return;
   15739 
   15740 	switch (sc->sc_type) {
   15741 	case WM_T_82571:
   15742 	case WM_T_82572:
   15743 	case WM_T_82573:
   15744 	case WM_T_82575:
   15745 	case WM_T_82576:
   15746 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   15747 		phyval &= ~PMR_D0_LPLU;
   15748 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   15749 		break;
   15750 	case WM_T_82580:
   15751 	case WM_T_I350:
   15752 	case WM_T_I210:
   15753 	case WM_T_I211:
   15754 		reg = CSR_READ(sc, WMREG_PHPM);
   15755 		reg &= ~PHPM_D0A_LPLU;
   15756 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15757 		break;
   15758 	case WM_T_82574:
   15759 	case WM_T_82583:
   15760 	case WM_T_ICH8:
   15761 	case WM_T_ICH9:
   15762 	case WM_T_ICH10:
   15763 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15764 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15765 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15766 		CSR_WRITE_FLUSH(sc);
   15767 		break;
   15768 	case WM_T_PCH:
   15769 	case WM_T_PCH2:
   15770 	case WM_T_PCH_LPT:
   15771 	case WM_T_PCH_SPT:
   15772 	case WM_T_PCH_CNP:
   15773 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15774 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15775 		if (wm_phy_resetisblocked(sc) == false)
   15776 			phyval |= HV_OEM_BITS_ANEGNOW;
   15777 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15778 		break;
   15779 	default:
   15780 		break;
   15781 	}
   15782 }
   15783 
   15784 /* EEE */
   15785 
   15786 static int
   15787 wm_set_eee_i350(struct wm_softc *sc)
   15788 {
   15789 	struct ethercom *ec = &sc->sc_ethercom;
   15790 	uint32_t ipcnfg, eeer;
   15791 	uint32_t ipcnfg_mask
   15792 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15793 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15794 
   15795 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15796 
   15797 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15798 	eeer = CSR_READ(sc, WMREG_EEER);
   15799 
   15800 	/* Enable or disable per user setting */
   15801 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15802 		ipcnfg |= ipcnfg_mask;
   15803 		eeer |= eeer_mask;
   15804 	} else {
   15805 		ipcnfg &= ~ipcnfg_mask;
   15806 		eeer &= ~eeer_mask;
   15807 	}
   15808 
   15809 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15810 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15811 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15812 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15813 
   15814 	return 0;
   15815 }
   15816 
   15817 static int
   15818 wm_set_eee_pchlan(struct wm_softc *sc)
   15819 {
   15820 	device_t dev = sc->sc_dev;
   15821 	struct ethercom *ec = &sc->sc_ethercom;
   15822 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15823 	int rv = 0;
   15824 
   15825 	switch (sc->sc_phytype) {
   15826 	case WMPHY_82579:
   15827 		lpa = I82579_EEE_LP_ABILITY;
   15828 		pcs_status = I82579_EEE_PCS_STATUS;
   15829 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15830 		break;
   15831 	case WMPHY_I217:
   15832 		lpa = I217_EEE_LP_ABILITY;
   15833 		pcs_status = I217_EEE_PCS_STATUS;
   15834 		adv_addr = I217_EEE_ADVERTISEMENT;
   15835 		break;
   15836 	default:
   15837 		return 0;
   15838 	}
   15839 
   15840 	if (sc->phy.acquire(sc)) {
   15841 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15842 		return 0;
   15843 	}
   15844 
   15845 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15846 	if (rv != 0)
   15847 		goto release;
   15848 
   15849 	/* Clear bits that enable EEE in various speeds */
   15850 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15851 
   15852 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15853 		/* Save off link partner's EEE ability */
   15854 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15855 		if (rv != 0)
   15856 			goto release;
   15857 
   15858 		/* Read EEE advertisement */
   15859 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15860 			goto release;
   15861 
   15862 		/*
   15863 		 * Enable EEE only for speeds in which the link partner is
   15864 		 * EEE capable and for which we advertise EEE.
   15865 		 */
   15866 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15867 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15868 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15869 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15870 			if ((data & ANLPAR_TX_FD) != 0)
   15871 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15872 			else {
   15873 				/*
   15874 				 * EEE is not supported in 100Half, so ignore
   15875 				 * partner's EEE in 100 ability if full-duplex
   15876 				 * is not advertised.
   15877 				 */
   15878 				sc->eee_lp_ability
   15879 				    &= ~AN_EEEADVERT_100_TX;
   15880 			}
   15881 		}
   15882 	}
   15883 
   15884 	if (sc->sc_phytype == WMPHY_82579) {
   15885 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15886 		if (rv != 0)
   15887 			goto release;
   15888 
   15889 		data &= ~I82579_LPI_PLL_SHUT_100;
   15890 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15891 	}
   15892 
   15893 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15894 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15895 		goto release;
   15896 
   15897 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15898 release:
   15899 	sc->phy.release(sc);
   15900 
   15901 	return rv;
   15902 }
   15903 
   15904 static int
   15905 wm_set_eee(struct wm_softc *sc)
   15906 {
   15907 	struct ethercom *ec = &sc->sc_ethercom;
   15908 
   15909 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15910 		return 0;
   15911 
   15912 	if (sc->sc_type == WM_T_I354) {
   15913 		/* I354 uses an external PHY */
   15914 		return 0; /* not yet */
   15915 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15916 		return wm_set_eee_i350(sc);
   15917 	else if (sc->sc_type >= WM_T_PCH2)
   15918 		return wm_set_eee_pchlan(sc);
   15919 
   15920 	return 0;
   15921 }
   15922 
   15923 /*
   15924  * Workarounds (mainly PHY related).
   15925  * Basically, PHY's workarounds are in the PHY drivers.
   15926  */
   15927 
   15928 /* Work-around for 82566 Kumeran PCS lock loss */
   15929 static int
   15930 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15931 {
   15932 	struct mii_data *mii = &sc->sc_mii;
   15933 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15934 	int i, reg, rv;
   15935 	uint16_t phyreg;
   15936 
   15937 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15938 		device_xname(sc->sc_dev), __func__));
   15939 
   15940 	/* If the link is not up, do nothing */
   15941 	if ((status & STATUS_LU) == 0)
   15942 		return 0;
   15943 
   15944 	/* Nothing to do if the link is other than 1Gbps */
   15945 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15946 		return 0;
   15947 
   15948 	for (i = 0; i < 10; i++) {
   15949 		/* read twice */
   15950 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15951 		if (rv != 0)
   15952 			return rv;
   15953 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15954 		if (rv != 0)
   15955 			return rv;
   15956 
   15957 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15958 			goto out;	/* GOOD! */
   15959 
   15960 		/* Reset the PHY */
   15961 		wm_reset_phy(sc);
   15962 		delay(5*1000);
   15963 	}
   15964 
   15965 	/* Disable GigE link negotiation */
   15966 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15967 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15968 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15969 
   15970 	/*
   15971 	 * Call gig speed drop workaround on Gig disable before accessing
   15972 	 * any PHY registers.
   15973 	 */
   15974 	wm_gig_downshift_workaround_ich8lan(sc);
   15975 
   15976 out:
   15977 	return 0;
   15978 }
   15979 
   15980 /*
   15981  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15982  *  @sc: pointer to the HW structure
   15983  *
   15984  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15985  *  LPLU, Gig disable, MDIC PHY reset):
   15986  *    1) Set Kumeran Near-end loopback
   15987  *    2) Clear Kumeran Near-end loopback
   15988  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15989  */
   15990 static void
   15991 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15992 {
   15993 	uint16_t kmreg;
   15994 
   15995 	/* Only for igp3 */
   15996 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15997 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15998 			return;
   15999 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   16000 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   16001 			return;
   16002 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   16003 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   16004 	}
   16005 }
   16006 
   16007 /*
   16008  * Workaround for pch's PHYs
   16009  * XXX should be moved to new PHY driver?
   16010  */
   16011 static int
   16012 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16013 {
   16014 	device_t dev = sc->sc_dev;
   16015 	struct mii_data *mii = &sc->sc_mii;
   16016 	struct mii_softc *child;
   16017 	uint16_t phy_data, phyrev = 0;
   16018 	int phytype = sc->sc_phytype;
   16019 	int rv;
   16020 
   16021 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16022 		device_xname(dev), __func__));
   16023 	KASSERT(sc->sc_type == WM_T_PCH);
   16024 
   16025 	/* Set MDIO slow mode before any other MDIO access */
   16026 	if (phytype == WMPHY_82577)
   16027 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16028 			return rv;
   16029 
   16030 	child = LIST_FIRST(&mii->mii_phys);
   16031 	if (child != NULL)
   16032 		phyrev = child->mii_mpd_rev;
   16033 
   16034 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16035 	if ((child != NULL) &&
   16036 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16037 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16038 		/* Disable generation of early preamble (0x4431) */
   16039 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16040 		    &phy_data);
   16041 		if (rv != 0)
   16042 			return rv;
   16043 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16044 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16045 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16046 		    phy_data);
   16047 		if (rv != 0)
   16048 			return rv;
   16049 
   16050 		/* Preamble tuning for SSC */
   16051 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16052 		if (rv != 0)
   16053 			return rv;
   16054 	}
   16055 
   16056 	/* 82578 */
   16057 	if (phytype == WMPHY_82578) {
   16058 		/*
   16059 		 * Return registers to default by doing a soft reset then
   16060 		 * writing 0x3140 to the control register
   16061 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16062 		 */
   16063 		if ((child != NULL) && (phyrev < 2)) {
   16064 			PHY_RESET(child);
   16065 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16066 			if (rv != 0)
   16067 				return rv;
   16068 		}
   16069 	}
   16070 
   16071 	/* Select page 0 */
   16072 	if ((rv = sc->phy.acquire(sc)) != 0)
   16073 		return rv;
   16074 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16075 	sc->phy.release(sc);
   16076 	if (rv != 0)
   16077 		return rv;
   16078 
   16079 	/*
   16080 	 * Configure the K1 Si workaround during phy reset assuming there is
   16081 	 * link so that it disables K1 if link is in 1Gbps.
   16082 	 */
   16083 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16084 		return rv;
   16085 
   16086 	/* Workaround for link disconnects on a busy hub in half duplex */
   16087 	rv = sc->phy.acquire(sc);
   16088 	if (rv)
   16089 		return rv;
   16090 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16091 	if (rv)
   16092 		goto release;
   16093 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16094 	    phy_data & 0x00ff);
   16095 	if (rv)
   16096 		goto release;
   16097 
   16098 	/* Set MSE higher to enable link to stay up when noise is high */
   16099 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16100 release:
   16101 	sc->phy.release(sc);
   16102 
   16103 	return rv;
   16104 }
   16105 
   16106 /*
   16107  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16108  *  @sc:   pointer to the HW structure
   16109  */
   16110 static void
   16111 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16112 {
   16113 
   16114 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16115 		device_xname(sc->sc_dev), __func__));
   16116 
   16117 	if (sc->phy.acquire(sc) != 0)
   16118 		return;
   16119 
   16120 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16121 
   16122 	sc->phy.release(sc);
   16123 }
   16124 
   16125 static void
   16126 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16127 {
   16128 	device_t dev = sc->sc_dev;
   16129 	uint32_t mac_reg;
   16130 	uint16_t i, wuce;
   16131 	int count;
   16132 
   16133 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16134 		device_xname(dev), __func__));
   16135 
   16136 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16137 		return;
   16138 
   16139 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16140 	count = wm_rar_count(sc);
   16141 	for (i = 0; i < count; i++) {
   16142 		uint16_t lo, hi;
   16143 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16144 		lo = (uint16_t)(mac_reg & 0xffff);
   16145 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16146 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16147 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16148 
   16149 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16150 		lo = (uint16_t)(mac_reg & 0xffff);
   16151 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16152 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16153 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16154 	}
   16155 
   16156 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16157 }
   16158 
   16159 /*
   16160  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16161  *  with 82579 PHY
   16162  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16163  */
   16164 static int
   16165 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16166 {
   16167 	device_t dev = sc->sc_dev;
   16168 	int rar_count;
   16169 	int rv;
   16170 	uint32_t mac_reg;
   16171 	uint16_t dft_ctrl, data;
   16172 	uint16_t i;
   16173 
   16174 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16175 		device_xname(dev), __func__));
   16176 
   16177 	if (sc->sc_type < WM_T_PCH2)
   16178 		return 0;
   16179 
   16180 	/* Acquire PHY semaphore */
   16181 	rv = sc->phy.acquire(sc);
   16182 	if (rv != 0)
   16183 		return rv;
   16184 
   16185 	/* Disable Rx path while enabling/disabling workaround */
   16186 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16187 	if (rv != 0)
   16188 		goto out;
   16189 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16190 	    dft_ctrl | (1 << 14));
   16191 	if (rv != 0)
   16192 		goto out;
   16193 
   16194 	if (enable) {
   16195 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16196 		 * SHRAL/H) and initial CRC values to the MAC
   16197 		 */
   16198 		rar_count = wm_rar_count(sc);
   16199 		for (i = 0; i < rar_count; i++) {
   16200 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16201 			uint32_t addr_high, addr_low;
   16202 
   16203 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16204 			if (!(addr_high & RAL_AV))
   16205 				continue;
   16206 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16207 			mac_addr[0] = (addr_low & 0xFF);
   16208 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16209 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16210 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16211 			mac_addr[4] = (addr_high & 0xFF);
   16212 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16213 
   16214 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16215 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16216 		}
   16217 
   16218 		/* Write Rx addresses to the PHY */
   16219 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16220 	}
   16221 
   16222 	/*
   16223 	 * If enable ==
   16224 	 *	true: Enable jumbo frame workaround in the MAC.
   16225 	 *	false: Write MAC register values back to h/w defaults.
   16226 	 */
   16227 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16228 	if (enable) {
   16229 		mac_reg &= ~(1 << 14);
   16230 		mac_reg |= (7 << 15);
   16231 	} else
   16232 		mac_reg &= ~(0xf << 14);
   16233 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16234 
   16235 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16236 	if (enable) {
   16237 		mac_reg |= RCTL_SECRC;
   16238 		sc->sc_rctl |= RCTL_SECRC;
   16239 		sc->sc_flags |= WM_F_CRC_STRIP;
   16240 	} else {
   16241 		mac_reg &= ~RCTL_SECRC;
   16242 		sc->sc_rctl &= ~RCTL_SECRC;
   16243 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16244 	}
   16245 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16246 
   16247 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16248 	if (rv != 0)
   16249 		goto out;
   16250 	if (enable)
   16251 		data |= 1 << 0;
   16252 	else
   16253 		data &= ~(1 << 0);
   16254 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16255 	if (rv != 0)
   16256 		goto out;
   16257 
   16258 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16259 	if (rv != 0)
   16260 		goto out;
   16261 	/*
   16262 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16263 	 * on both the enable case and the disable case. Is it correct?
   16264 	 */
   16265 	data &= ~(0xf << 8);
   16266 	data |= (0xb << 8);
   16267 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16268 	if (rv != 0)
   16269 		goto out;
   16270 
   16271 	/*
   16272 	 * If enable ==
   16273 	 *	true: Enable jumbo frame workaround in the PHY.
   16274 	 *	false: Write PHY register values back to h/w defaults.
   16275 	 */
   16276 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16277 	if (rv != 0)
   16278 		goto out;
   16279 	data &= ~(0x7F << 5);
   16280 	if (enable)
   16281 		data |= (0x37 << 5);
   16282 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16283 	if (rv != 0)
   16284 		goto out;
   16285 
   16286 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16287 	if (rv != 0)
   16288 		goto out;
   16289 	if (enable)
   16290 		data &= ~(1 << 13);
   16291 	else
   16292 		data |= (1 << 13);
   16293 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16294 	if (rv != 0)
   16295 		goto out;
   16296 
   16297 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16298 	if (rv != 0)
   16299 		goto out;
   16300 	data &= ~(0x3FF << 2);
   16301 	if (enable)
   16302 		data |= (I82579_TX_PTR_GAP << 2);
   16303 	else
   16304 		data |= (0x8 << 2);
   16305 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16306 	if (rv != 0)
   16307 		goto out;
   16308 
   16309 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16310 	    enable ? 0xf100 : 0x7e00);
   16311 	if (rv != 0)
   16312 		goto out;
   16313 
   16314 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16315 	if (rv != 0)
   16316 		goto out;
   16317 	if (enable)
   16318 		data |= 1 << 10;
   16319 	else
   16320 		data &= ~(1 << 10);
   16321 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16322 	if (rv != 0)
   16323 		goto out;
   16324 
   16325 	/* Re-enable Rx path after enabling/disabling workaround */
   16326 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16327 	    dft_ctrl & ~(1 << 14));
   16328 
   16329 out:
   16330 	sc->phy.release(sc);
   16331 
   16332 	return rv;
   16333 }
   16334 
   16335 /*
   16336  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16337  *  done after every PHY reset.
   16338  */
   16339 static int
   16340 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16341 {
   16342 	device_t dev = sc->sc_dev;
   16343 	int rv;
   16344 
   16345 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16346 		device_xname(dev), __func__));
   16347 	KASSERT(sc->sc_type == WM_T_PCH2);
   16348 
   16349 	/* Set MDIO slow mode before any other MDIO access */
   16350 	rv = wm_set_mdio_slow_mode_hv(sc);
   16351 	if (rv != 0)
   16352 		return rv;
   16353 
   16354 	rv = sc->phy.acquire(sc);
   16355 	if (rv != 0)
   16356 		return rv;
   16357 	/* Set MSE higher to enable link to stay up when noise is high */
   16358 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16359 	if (rv != 0)
   16360 		goto release;
   16361 	/* Drop link after 5 times MSE threshold was reached */
   16362 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16363 release:
   16364 	sc->phy.release(sc);
   16365 
   16366 	return rv;
   16367 }
   16368 
   16369 /**
   16370  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   16371  *  @link: link up bool flag
   16372  *
   16373  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   16374  *  preventing further DMA write requests.  Workaround the issue by disabling
   16375  *  the de-assertion of the clock request when in 1Gpbs mode.
   16376  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   16377  *  speeds in order to avoid Tx hangs.
   16378  **/
   16379 static int
   16380 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   16381 {
   16382 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   16383 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16384 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   16385 	uint16_t phyreg;
   16386 
   16387 	if (link && (speed == STATUS_SPEED_1000)) {
   16388 		sc->phy.acquire(sc);
   16389 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16390 		    &phyreg);
   16391 		if (rv != 0)
   16392 			goto release;
   16393 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16394 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   16395 		if (rv != 0)
   16396 			goto release;
   16397 		delay(20);
   16398 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   16399 
   16400 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16401 		    &phyreg);
   16402 release:
   16403 		sc->phy.release(sc);
   16404 		return rv;
   16405 	}
   16406 
   16407 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   16408 
   16409 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   16410 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   16411 	    || !link
   16412 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   16413 		goto update_fextnvm6;
   16414 
   16415 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   16416 
   16417 	/* Clear link status transmit timeout */
   16418 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   16419 	if (speed == STATUS_SPEED_100) {
   16420 		/* Set inband Tx timeout to 5x10us for 100Half */
   16421 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16422 
   16423 		/* Do not extend the K1 entry latency for 100Half */
   16424 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16425 	} else {
   16426 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   16427 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16428 
   16429 		/* Extend the K1 entry latency for 10 Mbps */
   16430 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16431 	}
   16432 
   16433 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   16434 
   16435 update_fextnvm6:
   16436 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   16437 	return 0;
   16438 }
   16439 
   16440 /*
   16441  *  wm_k1_gig_workaround_hv - K1 Si workaround
   16442  *  @sc:   pointer to the HW structure
   16443  *  @link: link up bool flag
   16444  *
   16445  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   16446  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   16447  *  If link is down, the function will restore the default K1 setting located
   16448  *  in the NVM.
   16449  */
   16450 static int
   16451 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   16452 {
   16453 	int k1_enable = sc->sc_nvm_k1_enabled;
   16454 
   16455 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16456 		device_xname(sc->sc_dev), __func__));
   16457 
   16458 	if (sc->phy.acquire(sc) != 0)
   16459 		return -1;
   16460 
   16461 	if (link) {
   16462 		k1_enable = 0;
   16463 
   16464 		/* Link stall fix for link up */
   16465 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16466 		    0x0100);
   16467 	} else {
   16468 		/* Link stall fix for link down */
   16469 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16470 		    0x4100);
   16471 	}
   16472 
   16473 	wm_configure_k1_ich8lan(sc, k1_enable);
   16474 	sc->phy.release(sc);
   16475 
   16476 	return 0;
   16477 }
   16478 
   16479 /*
   16480  *  wm_k1_workaround_lv - K1 Si workaround
   16481  *  @sc:   pointer to the HW structure
   16482  *
   16483  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   16484  *  Disable K1 for 1000 and 100 speeds
   16485  */
   16486 static int
   16487 wm_k1_workaround_lv(struct wm_softc *sc)
   16488 {
   16489 	uint32_t reg;
   16490 	uint16_t phyreg;
   16491 	int rv;
   16492 
   16493 	if (sc->sc_type != WM_T_PCH2)
   16494 		return 0;
   16495 
   16496 	/* Set K1 beacon duration based on 10Mbps speed */
   16497 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16498 	if (rv != 0)
   16499 		return rv;
   16500 
   16501 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16502 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16503 		if (phyreg &
   16504 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16505 			/* LV 1G/100 Packet drop issue wa  */
   16506 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16507 			    &phyreg);
   16508 			if (rv != 0)
   16509 				return rv;
   16510 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16511 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16512 			    phyreg);
   16513 			if (rv != 0)
   16514 				return rv;
   16515 		} else {
   16516 			/* For 10Mbps */
   16517 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16518 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16519 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16520 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16521 		}
   16522 	}
   16523 
   16524 	return 0;
   16525 }
   16526 
   16527 /*
   16528  *  wm_link_stall_workaround_hv - Si workaround
   16529  *  @sc: pointer to the HW structure
   16530  *
   16531  *  This function works around a Si bug where the link partner can get
   16532  *  a link up indication before the PHY does. If small packets are sent
   16533  *  by the link partner they can be placed in the packet buffer without
   16534  *  being properly accounted for by the PHY and will stall preventing
   16535  *  further packets from being received.  The workaround is to clear the
   16536  *  packet buffer after the PHY detects link up.
   16537  */
   16538 static int
   16539 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16540 {
   16541 	uint16_t phyreg;
   16542 
   16543 	if (sc->sc_phytype != WMPHY_82578)
   16544 		return 0;
   16545 
   16546 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16547 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16548 	if ((phyreg & BMCR_LOOP) != 0)
   16549 		return 0;
   16550 
   16551 	/* Check if link is up and at 1Gbps */
   16552 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16553 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16554 	    | BM_CS_STATUS_SPEED_MASK;
   16555 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16556 		| BM_CS_STATUS_SPEED_1000))
   16557 		return 0;
   16558 
   16559 	delay(200 * 1000);	/* XXX too big */
   16560 
   16561 	/* Flush the packets in the fifo buffer */
   16562 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16563 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16564 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16565 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16566 
   16567 	return 0;
   16568 }
   16569 
   16570 static int
   16571 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16572 {
   16573 	int rv;
   16574 	uint16_t reg;
   16575 
   16576 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16577 	if (rv != 0)
   16578 		return rv;
   16579 
   16580 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16581 	    reg | HV_KMRN_MDIO_SLOW);
   16582 }
   16583 
   16584 /*
   16585  *  wm_configure_k1_ich8lan - Configure K1 power state
   16586  *  @sc: pointer to the HW structure
   16587  *  @enable: K1 state to configure
   16588  *
   16589  *  Configure the K1 power state based on the provided parameter.
   16590  *  Assumes semaphore already acquired.
   16591  */
   16592 static void
   16593 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16594 {
   16595 	uint32_t ctrl, ctrl_ext, tmp;
   16596 	uint16_t kmreg;
   16597 	int rv;
   16598 
   16599 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16600 
   16601 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16602 	if (rv != 0)
   16603 		return;
   16604 
   16605 	if (k1_enable)
   16606 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16607 	else
   16608 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16609 
   16610 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16611 	if (rv != 0)
   16612 		return;
   16613 
   16614 	delay(20);
   16615 
   16616 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16617 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16618 
   16619 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16620 	tmp |= CTRL_FRCSPD;
   16621 
   16622 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16623 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16624 	CSR_WRITE_FLUSH(sc);
   16625 	delay(20);
   16626 
   16627 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16628 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16629 	CSR_WRITE_FLUSH(sc);
   16630 	delay(20);
   16631 
   16632 	return;
   16633 }
   16634 
   16635 /* special case - for 82575 - need to do manual init ... */
   16636 static void
   16637 wm_reset_init_script_82575(struct wm_softc *sc)
   16638 {
   16639 	/*
   16640 	 * Remark: this is untested code - we have no board without EEPROM
   16641 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16642 	 */
   16643 
   16644 	/* SerDes configuration via SERDESCTRL */
   16645 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16646 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16647 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16648 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16649 
   16650 	/* CCM configuration via CCMCTL register */
   16651 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16652 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16653 
   16654 	/* PCIe lanes configuration */
   16655 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16656 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16657 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16658 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16659 
   16660 	/* PCIe PLL Configuration */
   16661 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16662 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16663 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16664 }
   16665 
   16666 static void
   16667 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16668 {
   16669 	uint32_t reg;
   16670 	uint16_t nvmword;
   16671 	int rv;
   16672 
   16673 	if (sc->sc_type != WM_T_82580)
   16674 		return;
   16675 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16676 		return;
   16677 
   16678 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16679 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16680 	if (rv != 0) {
   16681 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16682 		    __func__);
   16683 		return;
   16684 	}
   16685 
   16686 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16687 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16688 		reg |= MDICNFG_DEST;
   16689 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16690 		reg |= MDICNFG_COM_MDIO;
   16691 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16692 }
   16693 
   16694 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16695 
   16696 static bool
   16697 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16698 {
   16699 	uint32_t reg;
   16700 	uint16_t id1, id2;
   16701 	int i, rv;
   16702 
   16703 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16704 		device_xname(sc->sc_dev), __func__));
   16705 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16706 
   16707 	id1 = id2 = 0xffff;
   16708 	for (i = 0; i < 2; i++) {
   16709 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16710 		    &id1);
   16711 		if ((rv != 0) || MII_INVALIDID(id1))
   16712 			continue;
   16713 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16714 		    &id2);
   16715 		if ((rv != 0) || MII_INVALIDID(id2))
   16716 			continue;
   16717 		break;
   16718 	}
   16719 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16720 		goto out;
   16721 
   16722 	/*
   16723 	 * In case the PHY needs to be in mdio slow mode,
   16724 	 * set slow mode and try to get the PHY id again.
   16725 	 */
   16726 	rv = 0;
   16727 	if (sc->sc_type < WM_T_PCH_LPT) {
   16728 		sc->phy.release(sc);
   16729 		wm_set_mdio_slow_mode_hv(sc);
   16730 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16731 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16732 		sc->phy.acquire(sc);
   16733 	}
   16734 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16735 		device_printf(sc->sc_dev, "XXX return with false\n");
   16736 		return false;
   16737 	}
   16738 out:
   16739 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16740 		/* Only unforce SMBus if ME is not active */
   16741 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16742 			uint16_t phyreg;
   16743 
   16744 			/* Unforce SMBus mode in PHY */
   16745 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16746 			    CV_SMB_CTRL, &phyreg);
   16747 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16748 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16749 			    CV_SMB_CTRL, phyreg);
   16750 
   16751 			/* Unforce SMBus mode in MAC */
   16752 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16753 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16754 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16755 		}
   16756 	}
   16757 	return true;
   16758 }
   16759 
   16760 static void
   16761 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16762 {
   16763 	uint32_t reg;
   16764 	int i;
   16765 
   16766 	/* Set PHY Config Counter to 50msec */
   16767 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16768 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16769 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16770 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16771 
   16772 	/* Toggle LANPHYPC */
   16773 	reg = CSR_READ(sc, WMREG_CTRL);
   16774 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16775 	reg &= ~CTRL_LANPHYPC_VALUE;
   16776 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16777 	CSR_WRITE_FLUSH(sc);
   16778 	delay(1000);
   16779 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16780 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16781 	CSR_WRITE_FLUSH(sc);
   16782 
   16783 	if (sc->sc_type < WM_T_PCH_LPT)
   16784 		delay(50 * 1000);
   16785 	else {
   16786 		i = 20;
   16787 
   16788 		do {
   16789 			delay(5 * 1000);
   16790 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16791 		    && i--);
   16792 
   16793 		delay(30 * 1000);
   16794 	}
   16795 }
   16796 
   16797 static int
   16798 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16799 {
   16800 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16801 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16802 	uint32_t rxa;
   16803 	uint16_t scale = 0, lat_enc = 0;
   16804 	int32_t obff_hwm = 0;
   16805 	int64_t lat_ns, value;
   16806 
   16807 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16808 		device_xname(sc->sc_dev), __func__));
   16809 
   16810 	if (link) {
   16811 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16812 		uint32_t status;
   16813 		uint16_t speed;
   16814 		pcireg_t preg;
   16815 
   16816 		status = CSR_READ(sc, WMREG_STATUS);
   16817 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16818 		case STATUS_SPEED_10:
   16819 			speed = 10;
   16820 			break;
   16821 		case STATUS_SPEED_100:
   16822 			speed = 100;
   16823 			break;
   16824 		case STATUS_SPEED_1000:
   16825 			speed = 1000;
   16826 			break;
   16827 		default:
   16828 			device_printf(sc->sc_dev, "Unknown speed "
   16829 			    "(status = %08x)\n", status);
   16830 			return -1;
   16831 		}
   16832 
   16833 		/* Rx Packet Buffer Allocation size (KB) */
   16834 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16835 
   16836 		/*
   16837 		 * Determine the maximum latency tolerated by the device.
   16838 		 *
   16839 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16840 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16841 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16842 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16843 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16844 		 */
   16845 		lat_ns = ((int64_t)rxa * 1024 -
   16846 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16847 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16848 		if (lat_ns < 0)
   16849 			lat_ns = 0;
   16850 		else
   16851 			lat_ns /= speed;
   16852 		value = lat_ns;
   16853 
   16854 		while (value > LTRV_VALUE) {
   16855 			scale ++;
   16856 			value = howmany(value, __BIT(5));
   16857 		}
   16858 		if (scale > LTRV_SCALE_MAX) {
   16859 			device_printf(sc->sc_dev,
   16860 			    "Invalid LTR latency scale %d\n", scale);
   16861 			return -1;
   16862 		}
   16863 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16864 
   16865 		/* Determine the maximum latency tolerated by the platform */
   16866 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16867 		    WM_PCI_LTR_CAP_LPT);
   16868 		max_snoop = preg & 0xffff;
   16869 		max_nosnoop = preg >> 16;
   16870 
   16871 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16872 
   16873 		if (lat_enc > max_ltr_enc) {
   16874 			lat_enc = max_ltr_enc;
   16875 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16876 			    * PCI_LTR_SCALETONS(
   16877 				    __SHIFTOUT(lat_enc,
   16878 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16879 		}
   16880 
   16881 		if (lat_ns) {
   16882 			lat_ns *= speed * 1000;
   16883 			lat_ns /= 8;
   16884 			lat_ns /= 1000000000;
   16885 			obff_hwm = (int32_t)(rxa - lat_ns);
   16886 		}
   16887 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16888 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16889 			    "(rxa = %d, lat_ns = %d)\n",
   16890 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16891 			return -1;
   16892 		}
   16893 	}
   16894 	/* Snoop and No-Snoop latencies the same */
   16895 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16896 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16897 
   16898 	/* Set OBFF high water mark */
   16899 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16900 	reg |= obff_hwm;
   16901 	CSR_WRITE(sc, WMREG_SVT, reg);
   16902 
   16903 	/* Enable OBFF */
   16904 	reg = CSR_READ(sc, WMREG_SVCR);
   16905 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16906 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16907 
   16908 	return 0;
   16909 }
   16910 
   16911 /*
   16912  * I210 Errata 25 and I211 Errata 10
   16913  * Slow System Clock.
   16914  *
   16915  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   16916  */
   16917 static int
   16918 wm_pll_workaround_i210(struct wm_softc *sc)
   16919 {
   16920 	uint32_t mdicnfg, wuc;
   16921 	uint32_t reg;
   16922 	pcireg_t pcireg;
   16923 	uint32_t pmreg;
   16924 	uint16_t nvmword, tmp_nvmword;
   16925 	uint16_t phyval;
   16926 	bool wa_done = false;
   16927 	int i, rv = 0;
   16928 
   16929 	/* Get Power Management cap offset */
   16930 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16931 	    &pmreg, NULL) == 0)
   16932 		return -1;
   16933 
   16934 	/* Save WUC and MDICNFG registers */
   16935 	wuc = CSR_READ(sc, WMREG_WUC);
   16936 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16937 
   16938 	reg = mdicnfg & ~MDICNFG_DEST;
   16939 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16940 
   16941 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   16942 		/*
   16943 		 * The default value of the Initialization Control Word 1
   16944 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   16945 		 */
   16946 		nvmword = INVM_DEFAULT_AL;
   16947 	}
   16948 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16949 
   16950 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16951 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16952 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16953 
   16954 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16955 			rv = 0;
   16956 			break; /* OK */
   16957 		} else
   16958 			rv = -1;
   16959 
   16960 		wa_done = true;
   16961 		/* Directly reset the internal PHY */
   16962 		reg = CSR_READ(sc, WMREG_CTRL);
   16963 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16964 
   16965 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16966 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16967 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16968 
   16969 		CSR_WRITE(sc, WMREG_WUC, 0);
   16970 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16971 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16972 
   16973 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16974 		    pmreg + PCI_PMCSR);
   16975 		pcireg |= PCI_PMCSR_STATE_D3;
   16976 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16977 		    pmreg + PCI_PMCSR, pcireg);
   16978 		delay(1000);
   16979 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16980 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16981 		    pmreg + PCI_PMCSR, pcireg);
   16982 
   16983 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16984 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16985 
   16986 		/* Restore WUC register */
   16987 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16988 	}
   16989 
   16990 	/* Restore MDICNFG setting */
   16991 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16992 	if (wa_done)
   16993 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16994 	return rv;
   16995 }
   16996 
   16997 static void
   16998 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16999 {
   17000 	uint32_t reg;
   17001 
   17002 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17003 		device_xname(sc->sc_dev), __func__));
   17004 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   17005 	    || (sc->sc_type == WM_T_PCH_CNP));
   17006 
   17007 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   17008 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   17009 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   17010 
   17011 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17012 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17013 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17014 }
   17015 
   17016 /* Sysctl function */
   17017 #ifdef WM_DEBUG
   17018 static int
   17019 wm_sysctl_debug(SYSCTLFN_ARGS)
   17020 {
   17021 	struct sysctlnode node = *rnode;
   17022 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17023 	uint32_t dflags;
   17024 	int error;
   17025 
   17026 	dflags = sc->sc_debug;
   17027 	node.sysctl_data = &dflags;
   17028 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17029 
   17030 	if (error || newp == NULL)
   17031 		return error;
   17032 
   17033 	sc->sc_debug = dflags;
   17034 
   17035 	return 0;
   17036 }
   17037 #endif
   17038