Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.722
      1 /*	$NetBSD: if_wm.c,v 1.722 2021/12/11 17:05:50 skrll Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.722 2021/12/11 17:05:50 skrll Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 #include <sys/atomic.h>
    111 
    112 #include <sys/rndsource.h>
    113 
    114 #include <net/if.h>
    115 #include <net/if_dl.h>
    116 #include <net/if_media.h>
    117 #include <net/if_ether.h>
    118 
    119 #include <net/bpf.h>
    120 
    121 #include <net/rss_config.h>
    122 
    123 #include <netinet/in.h>			/* XXX for struct ip */
    124 #include <netinet/in_systm.h>		/* XXX for struct ip */
    125 #include <netinet/ip.h>			/* XXX for struct ip */
    126 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    127 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    128 
    129 #include <sys/bus.h>
    130 #include <sys/intr.h>
    131 #include <machine/endian.h>
    132 
    133 #include <dev/mii/mii.h>
    134 #include <dev/mii/mdio.h>
    135 #include <dev/mii/miivar.h>
    136 #include <dev/mii/miidevs.h>
    137 #include <dev/mii/mii_bitbang.h>
    138 #include <dev/mii/ikphyreg.h>
    139 #include <dev/mii/igphyreg.h>
    140 #include <dev/mii/igphyvar.h>
    141 #include <dev/mii/inbmphyreg.h>
    142 #include <dev/mii/ihphyreg.h>
    143 #include <dev/mii/makphyreg.h>
    144 
    145 #include <dev/pci/pcireg.h>
    146 #include <dev/pci/pcivar.h>
    147 #include <dev/pci/pcidevs.h>
    148 
    149 #include <dev/pci/if_wmreg.h>
    150 #include <dev/pci/if_wmvar.h>
    151 
    152 #ifdef WM_DEBUG
    153 #define	WM_DEBUG_LINK		__BIT(0)
    154 #define	WM_DEBUG_TX		__BIT(1)
    155 #define	WM_DEBUG_RX		__BIT(2)
    156 #define	WM_DEBUG_GMII		__BIT(3)
    157 #define	WM_DEBUG_MANAGE		__BIT(4)
    158 #define	WM_DEBUG_NVM		__BIT(5)
    159 #define	WM_DEBUG_INIT		__BIT(6)
    160 #define	WM_DEBUG_LOCK		__BIT(7)
    161 
    162 #if 0
    163 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    164 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    165 	WM_DEBUG_LOCK
    166 #endif
    167 
    168 #define	DPRINTF(sc, x, y)			  \
    169 	do {					  \
    170 		if ((sc)->sc_debug & (x))	  \
    171 			printf y;		  \
    172 	} while (0)
    173 #else
    174 #define	DPRINTF(sc, x, y)	__nothing
    175 #endif /* WM_DEBUG */
    176 
    177 #ifdef NET_MPSAFE
    178 #define WM_MPSAFE	1
    179 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    180 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    181 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    182 #else
    183 #define WM_CALLOUT_FLAGS	0
    184 #define WM_SOFTINT_FLAGS	0
    185 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    186 #endif
    187 
    188 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    189 
    190 /*
    191  * This device driver's max interrupt numbers.
    192  */
    193 #define WM_MAX_NQUEUEINTR	16
    194 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    195 
    196 #ifndef WM_DISABLE_MSI
    197 #define	WM_DISABLE_MSI 0
    198 #endif
    199 #ifndef WM_DISABLE_MSIX
    200 #define	WM_DISABLE_MSIX 0
    201 #endif
    202 
    203 int wm_disable_msi = WM_DISABLE_MSI;
    204 int wm_disable_msix = WM_DISABLE_MSIX;
    205 
    206 #ifndef WM_WATCHDOG_TIMEOUT
    207 #define WM_WATCHDOG_TIMEOUT 5
    208 #endif
    209 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    210 
    211 /*
    212  * Transmit descriptor list size.  Due to errata, we can only have
    213  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    214  * on >= 82544. We tell the upper layers that they can queue a lot
    215  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    216  * of them at a time.
    217  *
    218  * We allow up to 64 DMA segments per packet.  Pathological packet
    219  * chains containing many small mbufs have been observed in zero-copy
    220  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    221  * m_defrag() is called to reduce it.
    222  */
    223 #define	WM_NTXSEGS		64
    224 #define	WM_IFQUEUELEN		256
    225 #define	WM_TXQUEUELEN_MAX	64
    226 #define	WM_TXQUEUELEN_MAX_82547	16
    227 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    228 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    229 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    230 #define	WM_NTXDESC_82542	256
    231 #define	WM_NTXDESC_82544	4096
    232 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    233 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    234 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    235 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    236 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    237 
    238 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    239 
    240 #define	WM_TXINTERQSIZE		256
    241 
    242 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 /*
    250  * Receive descriptor list size.  We have one Rx buffer for normal
    251  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    252  * packet.  We allocate 256 receive descriptors, each with a 2k
    253  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    254  */
    255 #define	WM_NRXDESC		256U
    256 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    257 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    258 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    259 
    260 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    261 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    262 #endif
    263 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    264 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    265 #endif
    266 
    267 typedef union txdescs {
    268 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    269 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    270 } txdescs_t;
    271 
    272 typedef union rxdescs {
    273 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    274 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    275 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    276 } rxdescs_t;
    277 
    278 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    279 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    280 
    281 /*
    282  * Software state for transmit jobs.
    283  */
    284 struct wm_txsoft {
    285 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    287 	int txs_firstdesc;		/* first descriptor in packet */
    288 	int txs_lastdesc;		/* last descriptor in packet */
    289 	int txs_ndesc;			/* # of descriptors used */
    290 };
    291 
    292 /*
    293  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    294  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    295  * them together.
    296  */
    297 struct wm_rxsoft {
    298 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    299 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    300 };
    301 
    302 #define WM_LINKUP_TIMEOUT	50
    303 
    304 static uint16_t swfwphysem[] = {
    305 	SWFW_PHY0_SM,
    306 	SWFW_PHY1_SM,
    307 	SWFW_PHY2_SM,
    308 	SWFW_PHY3_SM
    309 };
    310 
    311 static const uint32_t wm_82580_rxpbs_table[] = {
    312 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    313 };
    314 
    315 struct wm_softc;
    316 
    317 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    318 #if !defined(WM_EVENT_COUNTERS)
    319 #define WM_EVENT_COUNTERS 1
    320 #endif
    321 #endif
    322 
    323 #ifdef WM_EVENT_COUNTERS
    324 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    325 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    326 	struct evcnt qname##_ev_##evname;
    327 
    328 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    329 	do {								\
    330 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    331 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    332 		    "%s%02d%s", #qname, (qnum), #evname);		\
    333 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    334 		    (evtype), NULL, (xname),				\
    335 		    (q)->qname##_##evname##_evcnt_name);		\
    336 	} while (0)
    337 
    338 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    339 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    340 
    341 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    342 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    343 
    344 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    345 	evcnt_detach(&(q)->qname##_ev_##evname);
    346 #endif /* WM_EVENT_COUNTERS */
    347 
    348 struct wm_txqueue {
    349 	kmutex_t *txq_lock;		/* lock for tx operations */
    350 
    351 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    352 
    353 	/* Software state for the transmit descriptors. */
    354 	int txq_num;			/* must be a power of two */
    355 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    356 
    357 	/* TX control data structures. */
    358 	int txq_ndesc;			/* must be a power of two */
    359 	size_t txq_descsize;		/* a tx descriptor size */
    360 	txdescs_t *txq_descs_u;
    361 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    362 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    363 	int txq_desc_rseg;		/* real number of control segment */
    364 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    365 #define	txq_descs	txq_descs_u->sctxu_txdescs
    366 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    367 
    368 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    369 
    370 	int txq_free;			/* number of free Tx descriptors */
    371 	int txq_next;			/* next ready Tx descriptor */
    372 
    373 	int txq_sfree;			/* number of free Tx jobs */
    374 	int txq_snext;			/* next free Tx job */
    375 	int txq_sdirty;			/* dirty Tx jobs */
    376 
    377 	/* These 4 variables are used only on the 82547. */
    378 	int txq_fifo_size;		/* Tx FIFO size */
    379 	int txq_fifo_head;		/* current head of FIFO */
    380 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    381 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    382 
    383 	/*
    384 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    385 	 * CPUs. This queue intermediate them without block.
    386 	 */
    387 	pcq_t *txq_interq;
    388 
    389 	/*
    390 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    391 	 * to manage Tx H/W queue's busy flag.
    392 	 */
    393 	int txq_flags;			/* flags for H/W queue, see below */
    394 #define	WM_TXQ_NO_SPACE		0x1
    395 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    396 
    397 	bool txq_stopping;
    398 
    399 	bool txq_sending;
    400 	time_t txq_lastsent;
    401 
    402 	/* Checksum flags used for previous packet */
    403 	uint32_t	txq_last_hw_cmd;
    404 	uint8_t		txq_last_hw_fields;
    405 	uint16_t	txq_last_hw_ipcs;
    406 	uint16_t	txq_last_hw_tucs;
    407 
    408 	uint32_t txq_packets;		/* for AIM */
    409 	uint32_t txq_bytes;		/* for AIM */
    410 #ifdef WM_EVENT_COUNTERS
    411 	/* TX event counters */
    412 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    413 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    414 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    415 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    416 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    417 					    /* XXX not used? */
    418 
    419 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    420 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    421 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    422 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    423 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    424 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    425 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    426 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    427 					    /* other than toomanyseg */
    428 
    429 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    430 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    431 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    432 	WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
    433 
    434 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    435 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    436 #endif /* WM_EVENT_COUNTERS */
    437 };
    438 
    439 struct wm_rxqueue {
    440 	kmutex_t *rxq_lock;		/* lock for rx operations */
    441 
    442 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    443 
    444 	/* Software state for the receive descriptors. */
    445 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    446 
    447 	/* RX control data structures. */
    448 	int rxq_ndesc;			/* must be a power of two */
    449 	size_t rxq_descsize;		/* a rx descriptor size */
    450 	rxdescs_t *rxq_descs_u;
    451 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    452 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    453 	int rxq_desc_rseg;		/* real number of control segment */
    454 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    455 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    456 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    457 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    458 
    459 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    460 
    461 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    462 	int rxq_discard;
    463 	int rxq_len;
    464 	struct mbuf *rxq_head;
    465 	struct mbuf *rxq_tail;
    466 	struct mbuf **rxq_tailp;
    467 
    468 	bool rxq_stopping;
    469 
    470 	uint32_t rxq_packets;		/* for AIM */
    471 	uint32_t rxq_bytes;		/* for AIM */
    472 #ifdef WM_EVENT_COUNTERS
    473 	/* RX event counters */
    474 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    475 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    476 
    477 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    478 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    479 #endif
    480 };
    481 
    482 struct wm_queue {
    483 	int wmq_id;			/* index of TX/RX queues */
    484 	int wmq_intr_idx;		/* index of MSI-X tables */
    485 
    486 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    487 	bool wmq_set_itr;
    488 
    489 	struct wm_txqueue wmq_txq;
    490 	struct wm_rxqueue wmq_rxq;
    491 	char sysctlname[32];		/* Name for sysctl */
    492 
    493 	bool wmq_txrx_use_workqueue;
    494 	struct work wmq_cookie;
    495 	void *wmq_si;
    496 };
    497 
    498 struct wm_phyop {
    499 	int (*acquire)(struct wm_softc *);
    500 	void (*release)(struct wm_softc *);
    501 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    502 	int (*writereg_locked)(device_t, int, int, uint16_t);
    503 	int reset_delay_us;
    504 	bool no_errprint;
    505 };
    506 
    507 struct wm_nvmop {
    508 	int (*acquire)(struct wm_softc *);
    509 	void (*release)(struct wm_softc *);
    510 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    511 };
    512 
    513 /*
    514  * Software state per device.
    515  */
    516 struct wm_softc {
    517 	device_t sc_dev;		/* generic device information */
    518 	bus_space_tag_t sc_st;		/* bus space tag */
    519 	bus_space_handle_t sc_sh;	/* bus space handle */
    520 	bus_size_t sc_ss;		/* bus space size */
    521 	bus_space_tag_t sc_iot;		/* I/O space tag */
    522 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    523 	bus_size_t sc_ios;		/* I/O space size */
    524 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    525 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    526 	bus_size_t sc_flashs;		/* flash registers space size */
    527 	off_t sc_flashreg_offset;	/*
    528 					 * offset to flash registers from
    529 					 * start of BAR
    530 					 */
    531 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    532 
    533 	struct ethercom sc_ethercom;	/* ethernet common data */
    534 	struct mii_data sc_mii;		/* MII/media information */
    535 
    536 	pci_chipset_tag_t sc_pc;
    537 	pcitag_t sc_pcitag;
    538 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    539 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    540 
    541 	uint16_t sc_pcidevid;		/* PCI device ID */
    542 	wm_chip_type sc_type;		/* MAC type */
    543 	int sc_rev;			/* MAC revision */
    544 	wm_phy_type sc_phytype;		/* PHY type */
    545 	uint8_t sc_sfptype;		/* SFP type */
    546 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    547 #define	WM_MEDIATYPE_UNKNOWN		0x00
    548 #define	WM_MEDIATYPE_FIBER		0x01
    549 #define	WM_MEDIATYPE_COPPER		0x02
    550 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    551 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    552 	int sc_flags;			/* flags; see below */
    553 	u_short sc_if_flags;		/* last if_flags */
    554 	int sc_ec_capenable;		/* last ec_capenable */
    555 	int sc_flowflags;		/* 802.3x flow control flags */
    556 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    557 	int sc_align_tweak;
    558 
    559 	void *sc_ihs[WM_MAX_NINTR];	/*
    560 					 * interrupt cookie.
    561 					 * - legacy and msi use sc_ihs[0] only
    562 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    563 					 */
    564 	pci_intr_handle_t *sc_intrs;	/*
    565 					 * legacy and msi use sc_intrs[0] only
    566 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    567 					 */
    568 	int sc_nintrs;			/* number of interrupts */
    569 
    570 	int sc_link_intr_idx;		/* index of MSI-X tables */
    571 
    572 	callout_t sc_tick_ch;		/* tick callout */
    573 	bool sc_core_stopping;
    574 
    575 	int sc_nvm_ver_major;
    576 	int sc_nvm_ver_minor;
    577 	int sc_nvm_ver_build;
    578 	int sc_nvm_addrbits;		/* NVM address bits */
    579 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    580 	int sc_ich8_flash_base;
    581 	int sc_ich8_flash_bank_size;
    582 	int sc_nvm_k1_enabled;
    583 
    584 	int sc_nqueues;
    585 	struct wm_queue *sc_queue;
    586 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    587 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    588 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    589 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    590 	struct workqueue *sc_queue_wq;
    591 	bool sc_txrx_use_workqueue;
    592 
    593 	int sc_affinity_offset;
    594 
    595 #ifdef WM_EVENT_COUNTERS
    596 	/* Event counters. */
    597 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    598 
    599 	/* WM_T_82542_2_1 only */
    600 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    601 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    602 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    603 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    604 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    605 #endif /* WM_EVENT_COUNTERS */
    606 
    607 	struct sysctllog *sc_sysctllog;
    608 
    609 	/* This variable are used only on the 82547. */
    610 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    611 
    612 	uint32_t sc_ctrl;		/* prototype CTRL register */
    613 #if 0
    614 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    615 #endif
    616 	uint32_t sc_icr;		/* prototype interrupt bits */
    617 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    618 	uint32_t sc_tctl;		/* prototype TCTL register */
    619 	uint32_t sc_rctl;		/* prototype RCTL register */
    620 	uint32_t sc_txcw;		/* prototype TXCW register */
    621 	uint32_t sc_tipg;		/* prototype TIPG register */
    622 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    623 	uint32_t sc_pba;		/* prototype PBA register */
    624 
    625 	int sc_tbi_linkup;		/* TBI link status */
    626 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    627 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    628 
    629 	int sc_mchash_type;		/* multicast filter offset */
    630 
    631 	krndsource_t rnd_source;	/* random source */
    632 
    633 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    634 
    635 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    636 	kmutex_t *sc_ich_phymtx;	/*
    637 					 * 82574/82583/ICH/PCH specific PHY
    638 					 * mutex. For 82574/82583, the mutex
    639 					 * is used for both PHY and NVM.
    640 					 */
    641 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    642 
    643 	struct wm_phyop phy;
    644 	struct wm_nvmop nvm;
    645 #ifdef WM_DEBUG
    646 	uint32_t sc_debug;
    647 #endif
    648 };
    649 
    650 #define WM_CORE_LOCK(_sc)						\
    651 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    652 #define WM_CORE_UNLOCK(_sc)						\
    653 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    654 #define WM_CORE_LOCKED(_sc)						\
    655 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    656 
    657 #define	WM_RXCHAIN_RESET(rxq)						\
    658 do {									\
    659 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    660 	*(rxq)->rxq_tailp = NULL;					\
    661 	(rxq)->rxq_len = 0;						\
    662 } while (/*CONSTCOND*/0)
    663 
    664 #define	WM_RXCHAIN_LINK(rxq, m)						\
    665 do {									\
    666 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    667 	(rxq)->rxq_tailp = &(m)->m_next;				\
    668 } while (/*CONSTCOND*/0)
    669 
    670 #ifdef WM_EVENT_COUNTERS
    671 #ifdef __HAVE_ATOMIC64_LOADSTORE
    672 #define	WM_EVCNT_INCR(ev)						\
    673 	atomic_store_relaxed(&((ev)->ev_count),				\
    674 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    675 #define	WM_EVCNT_ADD(ev, val)						\
    676 	atomic_store_relaxed(&((ev)->ev_count),				\
    677 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    678 #else
    679 #define	WM_EVCNT_INCR(ev)						\
    680 	((ev)->ev_count)++
    681 #define	WM_EVCNT_ADD(ev, val)						\
    682 	(ev)->ev_count += (val)
    683 #endif
    684 
    685 #define WM_Q_EVCNT_INCR(qname, evname)			\
    686 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    687 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    688 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    689 #else /* !WM_EVENT_COUNTERS */
    690 #define	WM_EVCNT_INCR(ev)	/* nothing */
    691 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    692 
    693 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    694 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    695 #endif /* !WM_EVENT_COUNTERS */
    696 
    697 #define	CSR_READ(sc, reg)						\
    698 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    699 #define	CSR_WRITE(sc, reg, val)						\
    700 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    701 #define	CSR_WRITE_FLUSH(sc)						\
    702 	(void)CSR_READ((sc), WMREG_STATUS)
    703 
    704 #define ICH8_FLASH_READ32(sc, reg)					\
    705 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    706 	    (reg) + sc->sc_flashreg_offset)
    707 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    708 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    709 	    (reg) + sc->sc_flashreg_offset, (data))
    710 
    711 #define ICH8_FLASH_READ16(sc, reg)					\
    712 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    713 	    (reg) + sc->sc_flashreg_offset)
    714 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    715 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    716 	    (reg) + sc->sc_flashreg_offset, (data))
    717 
    718 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    719 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    720 
    721 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    722 #define	WM_CDTXADDR_HI(txq, x)						\
    723 	(sizeof(bus_addr_t) == 8 ?					\
    724 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    725 
    726 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    727 #define	WM_CDRXADDR_HI(rxq, x)						\
    728 	(sizeof(bus_addr_t) == 8 ?					\
    729 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    730 
    731 /*
    732  * Register read/write functions.
    733  * Other than CSR_{READ|WRITE}().
    734  */
    735 #if 0
    736 static inline uint32_t wm_io_read(struct wm_softc *, int);
    737 #endif
    738 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    739 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    740     uint32_t, uint32_t);
    741 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    742 
    743 /*
    744  * Descriptor sync/init functions.
    745  */
    746 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    747 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    748 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    749 
    750 /*
    751  * Device driver interface functions and commonly used functions.
    752  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    753  */
    754 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    755 static int	wm_match(device_t, cfdata_t, void *);
    756 static void	wm_attach(device_t, device_t, void *);
    757 static int	wm_detach(device_t, int);
    758 static bool	wm_suspend(device_t, const pmf_qual_t *);
    759 static bool	wm_resume(device_t, const pmf_qual_t *);
    760 static void	wm_watchdog(struct ifnet *);
    761 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    762     uint16_t *);
    763 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    764     uint16_t *);
    765 static void	wm_tick(void *);
    766 static int	wm_ifflags_cb(struct ethercom *);
    767 static int	wm_ioctl(struct ifnet *, u_long, void *);
    768 /* MAC address related */
    769 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    770 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    771 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    772 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    773 static int	wm_rar_count(struct wm_softc *);
    774 static void	wm_set_filter(struct wm_softc *);
    775 /* Reset and init related */
    776 static void	wm_set_vlan(struct wm_softc *);
    777 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    778 static void	wm_get_auto_rd_done(struct wm_softc *);
    779 static void	wm_lan_init_done(struct wm_softc *);
    780 static void	wm_get_cfg_done(struct wm_softc *);
    781 static int	wm_phy_post_reset(struct wm_softc *);
    782 static int	wm_write_smbus_addr(struct wm_softc *);
    783 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    784 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    785 static void	wm_initialize_hardware_bits(struct wm_softc *);
    786 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    787 static int	wm_reset_phy(struct wm_softc *);
    788 static void	wm_flush_desc_rings(struct wm_softc *);
    789 static void	wm_reset(struct wm_softc *);
    790 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    791 static void	wm_rxdrain(struct wm_rxqueue *);
    792 static void	wm_init_rss(struct wm_softc *);
    793 static void	wm_adjust_qnum(struct wm_softc *, int);
    794 static inline bool	wm_is_using_msix(struct wm_softc *);
    795 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    796 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    797 static int	wm_setup_legacy(struct wm_softc *);
    798 static int	wm_setup_msix(struct wm_softc *);
    799 static int	wm_init(struct ifnet *);
    800 static int	wm_init_locked(struct ifnet *);
    801 static void	wm_init_sysctls(struct wm_softc *);
    802 static void	wm_unset_stopping_flags(struct wm_softc *);
    803 static void	wm_set_stopping_flags(struct wm_softc *);
    804 static void	wm_stop(struct ifnet *, int);
    805 static void	wm_stop_locked(struct ifnet *, bool, bool);
    806 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    807 static void	wm_82547_txfifo_stall(void *);
    808 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    809 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    810 /* DMA related */
    811 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    812 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    813 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    814 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    815     struct wm_txqueue *);
    816 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    817 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    818 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    819     struct wm_rxqueue *);
    820 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    821 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    822 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    823 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    824 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    825 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    826 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    827     struct wm_txqueue *);
    828 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    829     struct wm_rxqueue *);
    830 static int	wm_alloc_txrx_queues(struct wm_softc *);
    831 static void	wm_free_txrx_queues(struct wm_softc *);
    832 static int	wm_init_txrx_queues(struct wm_softc *);
    833 /* Start */
    834 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    835     struct wm_txsoft *, uint32_t *, uint8_t *);
    836 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    837 static void	wm_start(struct ifnet *);
    838 static void	wm_start_locked(struct ifnet *);
    839 static int	wm_transmit(struct ifnet *, struct mbuf *);
    840 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    841 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    842 		    bool);
    843 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    844     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    845 static void	wm_nq_start(struct ifnet *);
    846 static void	wm_nq_start_locked(struct ifnet *);
    847 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    848 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    849 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    850 		    bool);
    851 static void	wm_deferred_start_locked(struct wm_txqueue *);
    852 static void	wm_handle_queue(void *);
    853 static void	wm_handle_queue_work(struct work *, void *);
    854 /* Interrupt */
    855 static bool	wm_txeof(struct wm_txqueue *, u_int);
    856 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    857 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    858 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    859 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    860 static void	wm_linkintr(struct wm_softc *, uint32_t);
    861 static int	wm_intr_legacy(void *);
    862 static inline void	wm_txrxintr_disable(struct wm_queue *);
    863 static inline void	wm_txrxintr_enable(struct wm_queue *);
    864 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    865 static int	wm_txrxintr_msix(void *);
    866 static int	wm_linkintr_msix(void *);
    867 
    868 /*
    869  * Media related.
    870  * GMII, SGMII, TBI, SERDES and SFP.
    871  */
    872 /* Common */
    873 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    874 /* GMII related */
    875 static void	wm_gmii_reset(struct wm_softc *);
    876 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    877 static int	wm_get_phy_id_82575(struct wm_softc *);
    878 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    879 static int	wm_gmii_mediachange(struct ifnet *);
    880 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    881 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    882 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    883 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    884 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    885 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    886 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    887 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    888 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    889 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    890 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    891 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    892 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    893 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    894 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    895 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    896 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    897 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    898 	bool);
    899 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    900 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    901 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    902 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    903 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    904 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    905 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    906 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    907 static void	wm_gmii_statchg(struct ifnet *);
    908 /*
    909  * kumeran related (80003, ICH* and PCH*).
    910  * These functions are not for accessing MII registers but for accessing
    911  * kumeran specific registers.
    912  */
    913 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    914 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    915 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    916 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    917 /* EMI register related */
    918 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    919 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    920 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    921 /* SGMII */
    922 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    923 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    924 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    925 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    926 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    927 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    928 /* TBI related */
    929 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    930 static void	wm_tbi_mediainit(struct wm_softc *);
    931 static int	wm_tbi_mediachange(struct ifnet *);
    932 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    933 static int	wm_check_for_link(struct wm_softc *);
    934 static void	wm_tbi_tick(struct wm_softc *);
    935 /* SERDES related */
    936 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    937 static int	wm_serdes_mediachange(struct ifnet *);
    938 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    939 static void	wm_serdes_tick(struct wm_softc *);
    940 /* SFP related */
    941 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    942 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    943 
    944 /*
    945  * NVM related.
    946  * Microwire, SPI (w/wo EERD) and Flash.
    947  */
    948 /* Misc functions */
    949 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    950 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    951 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    952 /* Microwire */
    953 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    954 /* SPI */
    955 static int	wm_nvm_ready_spi(struct wm_softc *);
    956 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    957 /* Using with EERD */
    958 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    959 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    960 /* Flash */
    961 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    962     unsigned int *);
    963 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    964 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    965 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    966     uint32_t *);
    967 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    968 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    969 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    970 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    971 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    972 /* iNVM */
    973 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    974 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    975 /* Lock, detecting NVM type, validate checksum and read */
    976 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    977 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    978 static int	wm_nvm_validate_checksum(struct wm_softc *);
    979 static void	wm_nvm_version_invm(struct wm_softc *);
    980 static void	wm_nvm_version(struct wm_softc *);
    981 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    982 
    983 /*
    984  * Hardware semaphores.
    985  * Very complexed...
    986  */
    987 static int	wm_get_null(struct wm_softc *);
    988 static void	wm_put_null(struct wm_softc *);
    989 static int	wm_get_eecd(struct wm_softc *);
    990 static void	wm_put_eecd(struct wm_softc *);
    991 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    992 static void	wm_put_swsm_semaphore(struct wm_softc *);
    993 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    994 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    995 static int	wm_get_nvm_80003(struct wm_softc *);
    996 static void	wm_put_nvm_80003(struct wm_softc *);
    997 static int	wm_get_nvm_82571(struct wm_softc *);
    998 static void	wm_put_nvm_82571(struct wm_softc *);
    999 static int	wm_get_phy_82575(struct wm_softc *);
   1000 static void	wm_put_phy_82575(struct wm_softc *);
   1001 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1002 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1003 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1004 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1005 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1006 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1007 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1008 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1009 
   1010 /*
   1011  * Management mode and power management related subroutines.
   1012  * BMC, AMT, suspend/resume and EEE.
   1013  */
   1014 #if 0
   1015 static int	wm_check_mng_mode(struct wm_softc *);
   1016 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1017 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1018 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1019 #endif
   1020 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1021 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1022 static void	wm_get_hw_control(struct wm_softc *);
   1023 static void	wm_release_hw_control(struct wm_softc *);
   1024 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1025 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1026 static void	wm_init_manageability(struct wm_softc *);
   1027 static void	wm_release_manageability(struct wm_softc *);
   1028 static void	wm_get_wakeup(struct wm_softc *);
   1029 static int	wm_ulp_disable(struct wm_softc *);
   1030 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1031 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1032 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1033 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1034 static void	wm_enable_wakeup(struct wm_softc *);
   1035 static void	wm_disable_aspm(struct wm_softc *);
   1036 /* LPLU (Low Power Link Up) */
   1037 static void	wm_lplu_d0_disable(struct wm_softc *);
   1038 /* EEE */
   1039 static int	wm_set_eee_i350(struct wm_softc *);
   1040 static int	wm_set_eee_pchlan(struct wm_softc *);
   1041 static int	wm_set_eee(struct wm_softc *);
   1042 
   1043 /*
   1044  * Workarounds (mainly PHY related).
   1045  * Basically, PHY's workarounds are in the PHY drivers.
   1046  */
   1047 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1048 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1049 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1050 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1051 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1052 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1053 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1054 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1055 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1056 static int	wm_k1_workaround_lv(struct wm_softc *);
   1057 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1058 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1059 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1060 static void	wm_reset_init_script_82575(struct wm_softc *);
   1061 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1062 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1063 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1064 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1065 static int	wm_pll_workaround_i210(struct wm_softc *);
   1066 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1067 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1068 static void	wm_set_linkdown_discard(struct wm_softc *);
   1069 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1070 
   1071 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
   1072 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
   1073 #ifdef WM_DEBUG
   1074 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1075 #endif
   1076 
   1077 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1078     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1079 
   1080 /*
   1081  * Devices supported by this driver.
   1082  */
   1083 static const struct wm_product {
   1084 	pci_vendor_id_t		wmp_vendor;
   1085 	pci_product_id_t	wmp_product;
   1086 	const char		*wmp_name;
   1087 	wm_chip_type		wmp_type;
   1088 	uint32_t		wmp_flags;
   1089 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1090 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1091 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1092 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1093 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1094 } wm_products[] = {
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1096 	  "Intel i82542 1000BASE-X Ethernet",
   1097 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1100 	  "Intel i82543GC 1000BASE-X Ethernet",
   1101 	  WM_T_82543,		WMP_F_FIBER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1104 	  "Intel i82543GC 1000BASE-T Ethernet",
   1105 	  WM_T_82543,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1108 	  "Intel i82544EI 1000BASE-T Ethernet",
   1109 	  WM_T_82544,		WMP_F_COPPER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1112 	  "Intel i82544EI 1000BASE-X Ethernet",
   1113 	  WM_T_82544,		WMP_F_FIBER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1116 	  "Intel i82544GC 1000BASE-T Ethernet",
   1117 	  WM_T_82544,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1120 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1121 	  WM_T_82544,		WMP_F_COPPER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1124 	  "Intel i82540EM 1000BASE-T Ethernet",
   1125 	  WM_T_82540,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1128 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1129 	  WM_T_82540,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1132 	  "Intel i82540EP 1000BASE-T Ethernet",
   1133 	  WM_T_82540,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1136 	  "Intel i82540EP 1000BASE-T Ethernet",
   1137 	  WM_T_82540,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1140 	  "Intel i82540EP 1000BASE-T Ethernet",
   1141 	  WM_T_82540,		WMP_F_COPPER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1144 	  "Intel i82545EM 1000BASE-T Ethernet",
   1145 	  WM_T_82545,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1148 	  "Intel i82545GM 1000BASE-T Ethernet",
   1149 	  WM_T_82545_3,		WMP_F_COPPER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1152 	  "Intel i82545GM 1000BASE-X Ethernet",
   1153 	  WM_T_82545_3,		WMP_F_FIBER },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1156 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1157 	  WM_T_82545_3,		WMP_F_SERDES },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1160 	  "Intel i82546EB 1000BASE-T Ethernet",
   1161 	  WM_T_82546,		WMP_F_COPPER },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1164 	  "Intel i82546EB 1000BASE-T Ethernet",
   1165 	  WM_T_82546,		WMP_F_COPPER },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1168 	  "Intel i82545EM 1000BASE-X Ethernet",
   1169 	  WM_T_82545,		WMP_F_FIBER },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1172 	  "Intel i82546EB 1000BASE-X Ethernet",
   1173 	  WM_T_82546,		WMP_F_FIBER },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1176 	  "Intel i82546GB 1000BASE-T Ethernet",
   1177 	  WM_T_82546_3,		WMP_F_COPPER },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1180 	  "Intel i82546GB 1000BASE-X Ethernet",
   1181 	  WM_T_82546_3,		WMP_F_FIBER },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1184 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1185 	  WM_T_82546_3,		WMP_F_SERDES },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1188 	  "i82546GB quad-port Gigabit Ethernet",
   1189 	  WM_T_82546_3,		WMP_F_COPPER },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1192 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1193 	  WM_T_82546_3,		WMP_F_COPPER },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1196 	  "Intel PRO/1000MT (82546GB)",
   1197 	  WM_T_82546_3,		WMP_F_COPPER },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1200 	  "Intel i82541EI 1000BASE-T Ethernet",
   1201 	  WM_T_82541,		WMP_F_COPPER },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1204 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1205 	  WM_T_82541,		WMP_F_COPPER },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1208 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1209 	  WM_T_82541,		WMP_F_COPPER },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1212 	  "Intel i82541ER 1000BASE-T Ethernet",
   1213 	  WM_T_82541_2,		WMP_F_COPPER },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1216 	  "Intel i82541GI 1000BASE-T Ethernet",
   1217 	  WM_T_82541_2,		WMP_F_COPPER },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1220 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1221 	  WM_T_82541_2,		WMP_F_COPPER },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1224 	  "Intel i82541PI 1000BASE-T Ethernet",
   1225 	  WM_T_82541_2,		WMP_F_COPPER },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1228 	  "Intel i82547EI 1000BASE-T Ethernet",
   1229 	  WM_T_82547,		WMP_F_COPPER },
   1230 
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1232 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1233 	  WM_T_82547,		WMP_F_COPPER },
   1234 
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1236 	  "Intel i82547GI 1000BASE-T Ethernet",
   1237 	  WM_T_82547_2,		WMP_F_COPPER },
   1238 
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1240 	  "Intel PRO/1000 PT (82571EB)",
   1241 	  WM_T_82571,		WMP_F_COPPER },
   1242 
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1244 	  "Intel PRO/1000 PF (82571EB)",
   1245 	  WM_T_82571,		WMP_F_FIBER },
   1246 
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1248 	  "Intel PRO/1000 PB (82571EB)",
   1249 	  WM_T_82571,		WMP_F_SERDES },
   1250 
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1252 	  "Intel PRO/1000 QT (82571EB)",
   1253 	  WM_T_82571,		WMP_F_COPPER },
   1254 
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1256 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1257 	  WM_T_82571,		WMP_F_COPPER },
   1258 
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1260 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1261 	  WM_T_82571,		WMP_F_COPPER },
   1262 
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1264 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1265 	  WM_T_82571,		WMP_F_SERDES },
   1266 
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1268 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1269 	  WM_T_82571,		WMP_F_SERDES },
   1270 
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1272 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1273 	  WM_T_82571,		WMP_F_FIBER },
   1274 
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1276 	  "Intel i82572EI 1000baseT Ethernet",
   1277 	  WM_T_82572,		WMP_F_COPPER },
   1278 
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1280 	  "Intel i82572EI 1000baseX Ethernet",
   1281 	  WM_T_82572,		WMP_F_FIBER },
   1282 
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1284 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1285 	  WM_T_82572,		WMP_F_SERDES },
   1286 
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1288 	  "Intel i82572EI 1000baseT Ethernet",
   1289 	  WM_T_82572,		WMP_F_COPPER },
   1290 
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1292 	  "Intel i82573E",
   1293 	  WM_T_82573,		WMP_F_COPPER },
   1294 
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1296 	  "Intel i82573E IAMT",
   1297 	  WM_T_82573,		WMP_F_COPPER },
   1298 
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1300 	  "Intel i82573L Gigabit Ethernet",
   1301 	  WM_T_82573,		WMP_F_COPPER },
   1302 
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1304 	  "Intel i82574L",
   1305 	  WM_T_82574,		WMP_F_COPPER },
   1306 
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1308 	  "Intel i82574L",
   1309 	  WM_T_82574,		WMP_F_COPPER },
   1310 
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1312 	  "Intel i82583V",
   1313 	  WM_T_82583,		WMP_F_COPPER },
   1314 
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1316 	  "i80003 dual 1000baseT Ethernet",
   1317 	  WM_T_80003,		WMP_F_COPPER },
   1318 
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1320 	  "i80003 dual 1000baseX Ethernet",
   1321 	  WM_T_80003,		WMP_F_COPPER },
   1322 
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1324 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1325 	  WM_T_80003,		WMP_F_SERDES },
   1326 
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1328 	  "Intel i80003 1000baseT Ethernet",
   1329 	  WM_T_80003,		WMP_F_COPPER },
   1330 
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1332 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1333 	  WM_T_80003,		WMP_F_SERDES },
   1334 
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1336 	  "Intel i82801H (M_AMT) LAN Controller",
   1337 	  WM_T_ICH8,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1339 	  "Intel i82801H (AMT) LAN Controller",
   1340 	  WM_T_ICH8,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1342 	  "Intel i82801H LAN Controller",
   1343 	  WM_T_ICH8,		WMP_F_COPPER },
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1345 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1346 	  WM_T_ICH8,		WMP_F_COPPER },
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1348 	  "Intel i82801H (M) LAN Controller",
   1349 	  WM_T_ICH8,		WMP_F_COPPER },
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1351 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1352 	  WM_T_ICH8,		WMP_F_COPPER },
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1354 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1355 	  WM_T_ICH8,		WMP_F_COPPER },
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1357 	  "82567V-3 LAN Controller",
   1358 	  WM_T_ICH8,		WMP_F_COPPER },
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1360 	  "82801I (AMT) LAN Controller",
   1361 	  WM_T_ICH9,		WMP_F_COPPER },
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1363 	  "82801I 10/100 LAN Controller",
   1364 	  WM_T_ICH9,		WMP_F_COPPER },
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1366 	  "82801I (G) 10/100 LAN Controller",
   1367 	  WM_T_ICH9,		WMP_F_COPPER },
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1369 	  "82801I (GT) 10/100 LAN Controller",
   1370 	  WM_T_ICH9,		WMP_F_COPPER },
   1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1372 	  "82801I (C) LAN Controller",
   1373 	  WM_T_ICH9,		WMP_F_COPPER },
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1375 	  "82801I mobile LAN Controller",
   1376 	  WM_T_ICH9,		WMP_F_COPPER },
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1378 	  "82801I mobile (V) LAN Controller",
   1379 	  WM_T_ICH9,		WMP_F_COPPER },
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1381 	  "82801I mobile (AMT) LAN Controller",
   1382 	  WM_T_ICH9,		WMP_F_COPPER },
   1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1384 	  "82567LM-4 LAN Controller",
   1385 	  WM_T_ICH9,		WMP_F_COPPER },
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1387 	  "82567LM-2 LAN Controller",
   1388 	  WM_T_ICH10,		WMP_F_COPPER },
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1390 	  "82567LF-2 LAN Controller",
   1391 	  WM_T_ICH10,		WMP_F_COPPER },
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1393 	  "82567LM-3 LAN Controller",
   1394 	  WM_T_ICH10,		WMP_F_COPPER },
   1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1396 	  "82567LF-3 LAN Controller",
   1397 	  WM_T_ICH10,		WMP_F_COPPER },
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1399 	  "82567V-2 LAN Controller",
   1400 	  WM_T_ICH10,		WMP_F_COPPER },
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1402 	  "82567V-3? LAN Controller",
   1403 	  WM_T_ICH10,		WMP_F_COPPER },
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1405 	  "HANKSVILLE LAN Controller",
   1406 	  WM_T_ICH10,		WMP_F_COPPER },
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1408 	  "PCH LAN (82577LM) Controller",
   1409 	  WM_T_PCH,		WMP_F_COPPER },
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1411 	  "PCH LAN (82577LC) Controller",
   1412 	  WM_T_PCH,		WMP_F_COPPER },
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1414 	  "PCH LAN (82578DM) Controller",
   1415 	  WM_T_PCH,		WMP_F_COPPER },
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1417 	  "PCH LAN (82578DC) Controller",
   1418 	  WM_T_PCH,		WMP_F_COPPER },
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1420 	  "PCH2 LAN (82579LM) Controller",
   1421 	  WM_T_PCH2,		WMP_F_COPPER },
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1423 	  "PCH2 LAN (82579V) Controller",
   1424 	  WM_T_PCH2,		WMP_F_COPPER },
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1426 	  "82575EB dual-1000baseT Ethernet",
   1427 	  WM_T_82575,		WMP_F_COPPER },
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1429 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1430 	  WM_T_82575,		WMP_F_SERDES },
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1432 	  "82575GB quad-1000baseT Ethernet",
   1433 	  WM_T_82575,		WMP_F_COPPER },
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1435 	  "82575GB quad-1000baseT Ethernet (PM)",
   1436 	  WM_T_82575,		WMP_F_COPPER },
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1438 	  "82576 1000BaseT Ethernet",
   1439 	  WM_T_82576,		WMP_F_COPPER },
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1441 	  "82576 1000BaseX Ethernet",
   1442 	  WM_T_82576,		WMP_F_FIBER },
   1443 
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1445 	  "82576 gigabit Ethernet (SERDES)",
   1446 	  WM_T_82576,		WMP_F_SERDES },
   1447 
   1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1449 	  "82576 quad-1000BaseT Ethernet",
   1450 	  WM_T_82576,		WMP_F_COPPER },
   1451 
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1453 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1454 	  WM_T_82576,		WMP_F_COPPER },
   1455 
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1457 	  "82576 gigabit Ethernet",
   1458 	  WM_T_82576,		WMP_F_COPPER },
   1459 
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1461 	  "82576 gigabit Ethernet (SERDES)",
   1462 	  WM_T_82576,		WMP_F_SERDES },
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1464 	  "82576 quad-gigabit Ethernet (SERDES)",
   1465 	  WM_T_82576,		WMP_F_SERDES },
   1466 
   1467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1468 	  "82580 1000BaseT Ethernet",
   1469 	  WM_T_82580,		WMP_F_COPPER },
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1471 	  "82580 1000BaseX Ethernet",
   1472 	  WM_T_82580,		WMP_F_FIBER },
   1473 
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1475 	  "82580 1000BaseT Ethernet (SERDES)",
   1476 	  WM_T_82580,		WMP_F_SERDES },
   1477 
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1479 	  "82580 gigabit Ethernet (SGMII)",
   1480 	  WM_T_82580,		WMP_F_COPPER },
   1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1482 	  "82580 dual-1000BaseT Ethernet",
   1483 	  WM_T_82580,		WMP_F_COPPER },
   1484 
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1486 	  "82580 quad-1000BaseX Ethernet",
   1487 	  WM_T_82580,		WMP_F_FIBER },
   1488 
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1490 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1491 	  WM_T_82580,		WMP_F_COPPER },
   1492 
   1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1494 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1495 	  WM_T_82580,		WMP_F_SERDES },
   1496 
   1497 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1498 	  "DH89XXCC 1000BASE-KX Ethernet",
   1499 	  WM_T_82580,		WMP_F_SERDES },
   1500 
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1502 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1503 	  WM_T_82580,		WMP_F_SERDES },
   1504 
   1505 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1506 	  "I350 Gigabit Network Connection",
   1507 	  WM_T_I350,		WMP_F_COPPER },
   1508 
   1509 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1510 	  "I350 Gigabit Fiber Network Connection",
   1511 	  WM_T_I350,		WMP_F_FIBER },
   1512 
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1514 	  "I350 Gigabit Backplane Connection",
   1515 	  WM_T_I350,		WMP_F_SERDES },
   1516 
   1517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1518 	  "I350 Quad Port Gigabit Ethernet",
   1519 	  WM_T_I350,		WMP_F_SERDES },
   1520 
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1522 	  "I350 Gigabit Connection",
   1523 	  WM_T_I350,		WMP_F_COPPER },
   1524 
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1526 	  "I354 Gigabit Ethernet (KX)",
   1527 	  WM_T_I354,		WMP_F_SERDES },
   1528 
   1529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1530 	  "I354 Gigabit Ethernet (SGMII)",
   1531 	  WM_T_I354,		WMP_F_COPPER },
   1532 
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1534 	  "I354 Gigabit Ethernet (2.5G)",
   1535 	  WM_T_I354,		WMP_F_COPPER },
   1536 
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1538 	  "I210-T1 Ethernet Server Adapter",
   1539 	  WM_T_I210,		WMP_F_COPPER },
   1540 
   1541 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1542 	  "I210 Ethernet (Copper OEM)",
   1543 	  WM_T_I210,		WMP_F_COPPER },
   1544 
   1545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1546 	  "I210 Ethernet (Copper IT)",
   1547 	  WM_T_I210,		WMP_F_COPPER },
   1548 
   1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1550 	  "I210 Ethernet (Copper, FLASH less)",
   1551 	  WM_T_I210,		WMP_F_COPPER },
   1552 
   1553 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1554 	  "I210 Gigabit Ethernet (Fiber)",
   1555 	  WM_T_I210,		WMP_F_FIBER },
   1556 
   1557 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1558 	  "I210 Gigabit Ethernet (SERDES)",
   1559 	  WM_T_I210,		WMP_F_SERDES },
   1560 
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1562 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1563 	  WM_T_I210,		WMP_F_SERDES },
   1564 
   1565 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1566 	  "I210 Gigabit Ethernet (SGMII)",
   1567 	  WM_T_I210,		WMP_F_COPPER },
   1568 
   1569 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1570 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1571 	  WM_T_I210,		WMP_F_COPPER },
   1572 
   1573 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1574 	  "I211 Ethernet (COPPER)",
   1575 	  WM_T_I211,		WMP_F_COPPER },
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1577 	  "I217 V Ethernet Connection",
   1578 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1580 	  "I217 LM Ethernet Connection",
   1581 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1582 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1583 	  "I218 V Ethernet Connection",
   1584 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1585 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1586 	  "I218 V Ethernet Connection",
   1587 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1588 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1589 	  "I218 V Ethernet Connection",
   1590 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1591 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1592 	  "I218 LM Ethernet Connection",
   1593 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1594 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1595 	  "I218 LM Ethernet Connection",
   1596 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1597 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1598 	  "I218 LM Ethernet Connection",
   1599 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1600 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1601 	  "I219 LM Ethernet Connection",
   1602 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1603 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1604 	  "I219 LM (2) Ethernet Connection",
   1605 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1606 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1607 	  "I219 LM (3) Ethernet Connection",
   1608 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1609 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1610 	  "I219 LM (4) Ethernet Connection",
   1611 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1612 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1613 	  "I219 LM (5) Ethernet Connection",
   1614 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1615 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1616 	  "I219 LM (6) Ethernet Connection",
   1617 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1618 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1619 	  "I219 LM (7) Ethernet Connection",
   1620 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1621 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1622 	  "I219 LM (8) Ethernet Connection",
   1623 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1624 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1625 	  "I219 LM (9) Ethernet Connection",
   1626 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1627 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1628 	  "I219 LM (10) Ethernet Connection",
   1629 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1630 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1631 	  "I219 LM (11) Ethernet Connection",
   1632 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1633 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1634 	  "I219 LM (12) Ethernet Connection",
   1635 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1636 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1637 	  "I219 LM (13) Ethernet Connection",
   1638 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1639 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1640 	  "I219 LM (14) Ethernet Connection",
   1641 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1642 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1643 	  "I219 LM (15) Ethernet Connection",
   1644 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1645 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1646 	  "I219 LM (16) Ethernet Connection",
   1647 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1648 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1649 	  "I219 LM (17) Ethernet Connection",
   1650 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1651 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1652 	  "I219 LM (18) Ethernet Connection",
   1653 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1654 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1655 	  "I219 LM (19) Ethernet Connection",
   1656 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1657 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1658 	  "I219 V Ethernet Connection",
   1659 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1660 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1661 	  "I219 V (2) Ethernet Connection",
   1662 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1663 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1664 	  "I219 V (4) Ethernet Connection",
   1665 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1666 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1667 	  "I219 V (5) Ethernet Connection",
   1668 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1669 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1670 	  "I219 V (6) Ethernet Connection",
   1671 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1672 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1673 	  "I219 V (7) Ethernet Connection",
   1674 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1675 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1676 	  "I219 V (8) Ethernet Connection",
   1677 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1678 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1679 	  "I219 V (9) Ethernet Connection",
   1680 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1681 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1682 	  "I219 V (10) Ethernet Connection",
   1683 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1684 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1685 	  "I219 V (11) Ethernet Connection",
   1686 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1687 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1688 	  "I219 V (12) Ethernet Connection",
   1689 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1690 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1691 	  "I219 V (13) Ethernet Connection",
   1692 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1693 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1694 	  "I219 V (14) Ethernet Connection",
   1695 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1696 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1697 	  "I219 V (15) Ethernet Connection",
   1698 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1699 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1700 	  "I219 V (16) Ethernet Connection",
   1701 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1702 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1703 	  "I219 V (17) Ethernet Connection",
   1704 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1705 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1706 	  "I219 V (18) Ethernet Connection",
   1707 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1708 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1709 	  "I219 V (19) Ethernet Connection",
   1710 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1711 	{ 0,			0,
   1712 	  NULL,
   1713 	  0,			0 },
   1714 };
   1715 
   1716 /*
   1717  * Register read/write functions.
   1718  * Other than CSR_{READ|WRITE}().
   1719  */
   1720 
   1721 #if 0 /* Not currently used */
   1722 static inline uint32_t
   1723 wm_io_read(struct wm_softc *sc, int reg)
   1724 {
   1725 
   1726 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1727 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1728 }
   1729 #endif
   1730 
   1731 static inline void
   1732 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1733 {
   1734 
   1735 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1736 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1737 }
   1738 
   1739 static inline void
   1740 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1741     uint32_t data)
   1742 {
   1743 	uint32_t regval;
   1744 	int i;
   1745 
   1746 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1747 
   1748 	CSR_WRITE(sc, reg, regval);
   1749 
   1750 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1751 		delay(5);
   1752 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1753 			break;
   1754 	}
   1755 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1756 		aprint_error("%s: WARNING:"
   1757 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1758 		    device_xname(sc->sc_dev), reg);
   1759 	}
   1760 }
   1761 
   1762 static inline void
   1763 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1764 {
   1765 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
   1766 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
   1767 }
   1768 
   1769 /*
   1770  * Descriptor sync/init functions.
   1771  */
   1772 static inline void
   1773 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1774 {
   1775 	struct wm_softc *sc = txq->txq_sc;
   1776 
   1777 	/* If it will wrap around, sync to the end of the ring. */
   1778 	if ((start + num) > WM_NTXDESC(txq)) {
   1779 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1780 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1781 		    (WM_NTXDESC(txq) - start), ops);
   1782 		num -= (WM_NTXDESC(txq) - start);
   1783 		start = 0;
   1784 	}
   1785 
   1786 	/* Now sync whatever is left. */
   1787 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1788 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1789 }
   1790 
   1791 static inline void
   1792 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1793 {
   1794 	struct wm_softc *sc = rxq->rxq_sc;
   1795 
   1796 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1797 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1798 }
   1799 
   1800 static inline void
   1801 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1802 {
   1803 	struct wm_softc *sc = rxq->rxq_sc;
   1804 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1805 	struct mbuf *m = rxs->rxs_mbuf;
   1806 
   1807 	/*
   1808 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1809 	 * so that the payload after the Ethernet header is aligned
   1810 	 * to a 4-byte boundary.
   1811 
   1812 	 * XXX BRAINDAMAGE ALERT!
   1813 	 * The stupid chip uses the same size for every buffer, which
   1814 	 * is set in the Receive Control register.  We are using the 2K
   1815 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1816 	 * reason, we can't "scoot" packets longer than the standard
   1817 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1818 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1819 	 * the upper layer copy the headers.
   1820 	 */
   1821 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1822 
   1823 	if (sc->sc_type == WM_T_82574) {
   1824 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1825 		rxd->erx_data.erxd_addr =
   1826 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1827 		rxd->erx_data.erxd_dd = 0;
   1828 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1829 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1830 
   1831 		rxd->nqrx_data.nrxd_paddr =
   1832 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1833 		/* Currently, split header is not supported. */
   1834 		rxd->nqrx_data.nrxd_haddr = 0;
   1835 	} else {
   1836 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1837 
   1838 		wm_set_dma_addr(&rxd->wrx_addr,
   1839 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1840 		rxd->wrx_len = 0;
   1841 		rxd->wrx_cksum = 0;
   1842 		rxd->wrx_status = 0;
   1843 		rxd->wrx_errors = 0;
   1844 		rxd->wrx_special = 0;
   1845 	}
   1846 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1847 
   1848 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1849 }
   1850 
   1851 /*
   1852  * Device driver interface functions and commonly used functions.
   1853  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1854  */
   1855 
   1856 /* Lookup supported device table */
   1857 static const struct wm_product *
   1858 wm_lookup(const struct pci_attach_args *pa)
   1859 {
   1860 	const struct wm_product *wmp;
   1861 
   1862 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1863 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1864 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1865 			return wmp;
   1866 	}
   1867 	return NULL;
   1868 }
   1869 
   1870 /* The match function (ca_match) */
   1871 static int
   1872 wm_match(device_t parent, cfdata_t cf, void *aux)
   1873 {
   1874 	struct pci_attach_args *pa = aux;
   1875 
   1876 	if (wm_lookup(pa) != NULL)
   1877 		return 1;
   1878 
   1879 	return 0;
   1880 }
   1881 
   1882 /* The attach function (ca_attach) */
   1883 static void
   1884 wm_attach(device_t parent, device_t self, void *aux)
   1885 {
   1886 	struct wm_softc *sc = device_private(self);
   1887 	struct pci_attach_args *pa = aux;
   1888 	prop_dictionary_t dict;
   1889 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1890 	pci_chipset_tag_t pc = pa->pa_pc;
   1891 	int counts[PCI_INTR_TYPE_SIZE];
   1892 	pci_intr_type_t max_type;
   1893 	const char *eetype, *xname;
   1894 	bus_space_tag_t memt;
   1895 	bus_space_handle_t memh;
   1896 	bus_size_t memsize;
   1897 	int memh_valid;
   1898 	int i, error;
   1899 	const struct wm_product *wmp;
   1900 	prop_data_t ea;
   1901 	prop_number_t pn;
   1902 	uint8_t enaddr[ETHER_ADDR_LEN];
   1903 	char buf[256];
   1904 	char wqname[MAXCOMLEN];
   1905 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1906 	pcireg_t preg, memtype;
   1907 	uint16_t eeprom_data, apme_mask;
   1908 	bool force_clear_smbi;
   1909 	uint32_t link_mode;
   1910 	uint32_t reg;
   1911 
   1912 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1913 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1914 #endif
   1915 	sc->sc_dev = self;
   1916 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1917 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1918 	sc->sc_core_stopping = false;
   1919 
   1920 	wmp = wm_lookup(pa);
   1921 #ifdef DIAGNOSTIC
   1922 	if (wmp == NULL) {
   1923 		printf("\n");
   1924 		panic("wm_attach: impossible");
   1925 	}
   1926 #endif
   1927 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1928 
   1929 	sc->sc_pc = pa->pa_pc;
   1930 	sc->sc_pcitag = pa->pa_tag;
   1931 
   1932 	if (pci_dma64_available(pa))
   1933 		sc->sc_dmat = pa->pa_dmat64;
   1934 	else
   1935 		sc->sc_dmat = pa->pa_dmat;
   1936 
   1937 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1938 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1939 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1940 
   1941 	sc->sc_type = wmp->wmp_type;
   1942 
   1943 	/* Set default function pointers */
   1944 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1945 	sc->phy.release = sc->nvm.release = wm_put_null;
   1946 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1947 
   1948 	if (sc->sc_type < WM_T_82543) {
   1949 		if (sc->sc_rev < 2) {
   1950 			aprint_error_dev(sc->sc_dev,
   1951 			    "i82542 must be at least rev. 2\n");
   1952 			return;
   1953 		}
   1954 		if (sc->sc_rev < 3)
   1955 			sc->sc_type = WM_T_82542_2_0;
   1956 	}
   1957 
   1958 	/*
   1959 	 * Disable MSI for Errata:
   1960 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1961 	 *
   1962 	 *  82544: Errata 25
   1963 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1964 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1965 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1966 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1967 	 *
   1968 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1969 	 *
   1970 	 *  82571 & 82572: Errata 63
   1971 	 */
   1972 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1973 	    || (sc->sc_type == WM_T_82572))
   1974 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1975 
   1976 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1977 	    || (sc->sc_type == WM_T_82580)
   1978 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1979 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1980 		sc->sc_flags |= WM_F_NEWQUEUE;
   1981 
   1982 	/* Set device properties (mactype) */
   1983 	dict = device_properties(sc->sc_dev);
   1984 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1985 
   1986 	/*
   1987 	 * Map the device.  All devices support memory-mapped acccess,
   1988 	 * and it is really required for normal operation.
   1989 	 */
   1990 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1991 	switch (memtype) {
   1992 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1993 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1994 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1995 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1996 		break;
   1997 	default:
   1998 		memh_valid = 0;
   1999 		break;
   2000 	}
   2001 
   2002 	if (memh_valid) {
   2003 		sc->sc_st = memt;
   2004 		sc->sc_sh = memh;
   2005 		sc->sc_ss = memsize;
   2006 	} else {
   2007 		aprint_error_dev(sc->sc_dev,
   2008 		    "unable to map device registers\n");
   2009 		return;
   2010 	}
   2011 
   2012 	/*
   2013 	 * In addition, i82544 and later support I/O mapped indirect
   2014 	 * register access.  It is not desirable (nor supported in
   2015 	 * this driver) to use it for normal operation, though it is
   2016 	 * required to work around bugs in some chip versions.
   2017 	 */
   2018 	switch (sc->sc_type) {
   2019 	case WM_T_82544:
   2020 	case WM_T_82541:
   2021 	case WM_T_82541_2:
   2022 	case WM_T_82547:
   2023 	case WM_T_82547_2:
   2024 		/* First we have to find the I/O BAR. */
   2025 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2026 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2027 			if (memtype == PCI_MAPREG_TYPE_IO)
   2028 				break;
   2029 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2030 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2031 				i += 4;	/* skip high bits, too */
   2032 		}
   2033 		if (i < PCI_MAPREG_END) {
   2034 			/*
   2035 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2036 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2037 			 * It's no problem because newer chips has no this
   2038 			 * bug.
   2039 			 *
   2040 			 * The i8254x doesn't apparently respond when the
   2041 			 * I/O BAR is 0, which looks somewhat like it's not
   2042 			 * been configured.
   2043 			 */
   2044 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2045 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2046 				aprint_error_dev(sc->sc_dev,
   2047 				    "WARNING: I/O BAR at zero.\n");
   2048 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2049 					0, &sc->sc_iot, &sc->sc_ioh,
   2050 					NULL, &sc->sc_ios) == 0) {
   2051 				sc->sc_flags |= WM_F_IOH_VALID;
   2052 			} else
   2053 				aprint_error_dev(sc->sc_dev,
   2054 				    "WARNING: unable to map I/O space\n");
   2055 		}
   2056 		break;
   2057 	default:
   2058 		break;
   2059 	}
   2060 
   2061 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2062 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2063 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2064 	if (sc->sc_type < WM_T_82542_2_1)
   2065 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2066 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2067 
   2068 	/* Power up chip */
   2069 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2070 	    && error != EOPNOTSUPP) {
   2071 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2072 		return;
   2073 	}
   2074 
   2075 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2076 	/*
   2077 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2078 	 * resource.
   2079 	 */
   2080 	if (sc->sc_nqueues > 1) {
   2081 		max_type = PCI_INTR_TYPE_MSIX;
   2082 		/*
   2083 		 *  82583 has a MSI-X capability in the PCI configuration space
   2084 		 * but it doesn't support it. At least the document doesn't
   2085 		 * say anything about MSI-X.
   2086 		 */
   2087 		counts[PCI_INTR_TYPE_MSIX]
   2088 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2089 	} else {
   2090 		max_type = PCI_INTR_TYPE_MSI;
   2091 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2092 	}
   2093 
   2094 	/* Allocation settings */
   2095 	counts[PCI_INTR_TYPE_MSI] = 1;
   2096 	counts[PCI_INTR_TYPE_INTX] = 1;
   2097 	/* overridden by disable flags */
   2098 	if (wm_disable_msi != 0) {
   2099 		counts[PCI_INTR_TYPE_MSI] = 0;
   2100 		if (wm_disable_msix != 0) {
   2101 			max_type = PCI_INTR_TYPE_INTX;
   2102 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2103 		}
   2104 	} else if (wm_disable_msix != 0) {
   2105 		max_type = PCI_INTR_TYPE_MSI;
   2106 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2107 	}
   2108 
   2109 alloc_retry:
   2110 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2111 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2112 		return;
   2113 	}
   2114 
   2115 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2116 		error = wm_setup_msix(sc);
   2117 		if (error) {
   2118 			pci_intr_release(pc, sc->sc_intrs,
   2119 			    counts[PCI_INTR_TYPE_MSIX]);
   2120 
   2121 			/* Setup for MSI: Disable MSI-X */
   2122 			max_type = PCI_INTR_TYPE_MSI;
   2123 			counts[PCI_INTR_TYPE_MSI] = 1;
   2124 			counts[PCI_INTR_TYPE_INTX] = 1;
   2125 			goto alloc_retry;
   2126 		}
   2127 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2128 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2129 		error = wm_setup_legacy(sc);
   2130 		if (error) {
   2131 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2132 			    counts[PCI_INTR_TYPE_MSI]);
   2133 
   2134 			/* The next try is for INTx: Disable MSI */
   2135 			max_type = PCI_INTR_TYPE_INTX;
   2136 			counts[PCI_INTR_TYPE_INTX] = 1;
   2137 			goto alloc_retry;
   2138 		}
   2139 	} else {
   2140 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2141 		error = wm_setup_legacy(sc);
   2142 		if (error) {
   2143 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2144 			    counts[PCI_INTR_TYPE_INTX]);
   2145 			return;
   2146 		}
   2147 	}
   2148 
   2149 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2150 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2151 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2152 	    WM_WORKQUEUE_FLAGS);
   2153 	if (error) {
   2154 		aprint_error_dev(sc->sc_dev,
   2155 		    "unable to create workqueue\n");
   2156 		goto out;
   2157 	}
   2158 
   2159 	/*
   2160 	 * Check the function ID (unit number of the chip).
   2161 	 */
   2162 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2163 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2164 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2165 	    || (sc->sc_type == WM_T_82580)
   2166 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2167 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2168 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2169 	else
   2170 		sc->sc_funcid = 0;
   2171 
   2172 	/*
   2173 	 * Determine a few things about the bus we're connected to.
   2174 	 */
   2175 	if (sc->sc_type < WM_T_82543) {
   2176 		/* We don't really know the bus characteristics here. */
   2177 		sc->sc_bus_speed = 33;
   2178 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2179 		/*
   2180 		 * CSA (Communication Streaming Architecture) is about as fast
   2181 		 * a 32-bit 66MHz PCI Bus.
   2182 		 */
   2183 		sc->sc_flags |= WM_F_CSA;
   2184 		sc->sc_bus_speed = 66;
   2185 		aprint_verbose_dev(sc->sc_dev,
   2186 		    "Communication Streaming Architecture\n");
   2187 		if (sc->sc_type == WM_T_82547) {
   2188 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2189 			callout_setfunc(&sc->sc_txfifo_ch,
   2190 			    wm_82547_txfifo_stall, sc);
   2191 			aprint_verbose_dev(sc->sc_dev,
   2192 			    "using 82547 Tx FIFO stall work-around\n");
   2193 		}
   2194 	} else if (sc->sc_type >= WM_T_82571) {
   2195 		sc->sc_flags |= WM_F_PCIE;
   2196 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2197 		    && (sc->sc_type != WM_T_ICH10)
   2198 		    && (sc->sc_type != WM_T_PCH)
   2199 		    && (sc->sc_type != WM_T_PCH2)
   2200 		    && (sc->sc_type != WM_T_PCH_LPT)
   2201 		    && (sc->sc_type != WM_T_PCH_SPT)
   2202 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2203 			/* ICH* and PCH* have no PCIe capability registers */
   2204 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2205 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2206 				NULL) == 0)
   2207 				aprint_error_dev(sc->sc_dev,
   2208 				    "unable to find PCIe capability\n");
   2209 		}
   2210 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2211 	} else {
   2212 		reg = CSR_READ(sc, WMREG_STATUS);
   2213 		if (reg & STATUS_BUS64)
   2214 			sc->sc_flags |= WM_F_BUS64;
   2215 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2216 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2217 
   2218 			sc->sc_flags |= WM_F_PCIX;
   2219 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2220 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2221 				aprint_error_dev(sc->sc_dev,
   2222 				    "unable to find PCIX capability\n");
   2223 			else if (sc->sc_type != WM_T_82545_3 &&
   2224 				 sc->sc_type != WM_T_82546_3) {
   2225 				/*
   2226 				 * Work around a problem caused by the BIOS
   2227 				 * setting the max memory read byte count
   2228 				 * incorrectly.
   2229 				 */
   2230 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2231 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2232 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2233 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2234 
   2235 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2236 				    PCIX_CMD_BYTECNT_SHIFT;
   2237 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2238 				    PCIX_STATUS_MAXB_SHIFT;
   2239 				if (bytecnt > maxb) {
   2240 					aprint_verbose_dev(sc->sc_dev,
   2241 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2242 					    512 << bytecnt, 512 << maxb);
   2243 					pcix_cmd = (pcix_cmd &
   2244 					    ~PCIX_CMD_BYTECNT_MASK) |
   2245 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2246 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2247 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2248 					    pcix_cmd);
   2249 				}
   2250 			}
   2251 		}
   2252 		/*
   2253 		 * The quad port adapter is special; it has a PCIX-PCIX
   2254 		 * bridge on the board, and can run the secondary bus at
   2255 		 * a higher speed.
   2256 		 */
   2257 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2258 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2259 								      : 66;
   2260 		} else if (sc->sc_flags & WM_F_PCIX) {
   2261 			switch (reg & STATUS_PCIXSPD_MASK) {
   2262 			case STATUS_PCIXSPD_50_66:
   2263 				sc->sc_bus_speed = 66;
   2264 				break;
   2265 			case STATUS_PCIXSPD_66_100:
   2266 				sc->sc_bus_speed = 100;
   2267 				break;
   2268 			case STATUS_PCIXSPD_100_133:
   2269 				sc->sc_bus_speed = 133;
   2270 				break;
   2271 			default:
   2272 				aprint_error_dev(sc->sc_dev,
   2273 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2274 				    reg & STATUS_PCIXSPD_MASK);
   2275 				sc->sc_bus_speed = 66;
   2276 				break;
   2277 			}
   2278 		} else
   2279 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2280 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2281 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2282 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2283 	}
   2284 
   2285 	/* clear interesting stat counters */
   2286 	CSR_READ(sc, WMREG_COLC);
   2287 	CSR_READ(sc, WMREG_RXERRC);
   2288 
   2289 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2290 	    || (sc->sc_type >= WM_T_ICH8))
   2291 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2292 	if (sc->sc_type >= WM_T_ICH8)
   2293 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2294 
   2295 	/* Set PHY, NVM mutex related stuff */
   2296 	switch (sc->sc_type) {
   2297 	case WM_T_82542_2_0:
   2298 	case WM_T_82542_2_1:
   2299 	case WM_T_82543:
   2300 	case WM_T_82544:
   2301 		/* Microwire */
   2302 		sc->nvm.read = wm_nvm_read_uwire;
   2303 		sc->sc_nvm_wordsize = 64;
   2304 		sc->sc_nvm_addrbits = 6;
   2305 		break;
   2306 	case WM_T_82540:
   2307 	case WM_T_82545:
   2308 	case WM_T_82545_3:
   2309 	case WM_T_82546:
   2310 	case WM_T_82546_3:
   2311 		/* Microwire */
   2312 		sc->nvm.read = wm_nvm_read_uwire;
   2313 		reg = CSR_READ(sc, WMREG_EECD);
   2314 		if (reg & EECD_EE_SIZE) {
   2315 			sc->sc_nvm_wordsize = 256;
   2316 			sc->sc_nvm_addrbits = 8;
   2317 		} else {
   2318 			sc->sc_nvm_wordsize = 64;
   2319 			sc->sc_nvm_addrbits = 6;
   2320 		}
   2321 		sc->sc_flags |= WM_F_LOCK_EECD;
   2322 		sc->nvm.acquire = wm_get_eecd;
   2323 		sc->nvm.release = wm_put_eecd;
   2324 		break;
   2325 	case WM_T_82541:
   2326 	case WM_T_82541_2:
   2327 	case WM_T_82547:
   2328 	case WM_T_82547_2:
   2329 		reg = CSR_READ(sc, WMREG_EECD);
   2330 		/*
   2331 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2332 		 * on 8254[17], so set flags and functios before calling it.
   2333 		 */
   2334 		sc->sc_flags |= WM_F_LOCK_EECD;
   2335 		sc->nvm.acquire = wm_get_eecd;
   2336 		sc->nvm.release = wm_put_eecd;
   2337 		if (reg & EECD_EE_TYPE) {
   2338 			/* SPI */
   2339 			sc->nvm.read = wm_nvm_read_spi;
   2340 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2341 			wm_nvm_set_addrbits_size_eecd(sc);
   2342 		} else {
   2343 			/* Microwire */
   2344 			sc->nvm.read = wm_nvm_read_uwire;
   2345 			if ((reg & EECD_EE_ABITS) != 0) {
   2346 				sc->sc_nvm_wordsize = 256;
   2347 				sc->sc_nvm_addrbits = 8;
   2348 			} else {
   2349 				sc->sc_nvm_wordsize = 64;
   2350 				sc->sc_nvm_addrbits = 6;
   2351 			}
   2352 		}
   2353 		break;
   2354 	case WM_T_82571:
   2355 	case WM_T_82572:
   2356 		/* SPI */
   2357 		sc->nvm.read = wm_nvm_read_eerd;
   2358 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2359 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2360 		wm_nvm_set_addrbits_size_eecd(sc);
   2361 		sc->phy.acquire = wm_get_swsm_semaphore;
   2362 		sc->phy.release = wm_put_swsm_semaphore;
   2363 		sc->nvm.acquire = wm_get_nvm_82571;
   2364 		sc->nvm.release = wm_put_nvm_82571;
   2365 		break;
   2366 	case WM_T_82573:
   2367 	case WM_T_82574:
   2368 	case WM_T_82583:
   2369 		sc->nvm.read = wm_nvm_read_eerd;
   2370 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2371 		if (sc->sc_type == WM_T_82573) {
   2372 			sc->phy.acquire = wm_get_swsm_semaphore;
   2373 			sc->phy.release = wm_put_swsm_semaphore;
   2374 			sc->nvm.acquire = wm_get_nvm_82571;
   2375 			sc->nvm.release = wm_put_nvm_82571;
   2376 		} else {
   2377 			/* Both PHY and NVM use the same semaphore. */
   2378 			sc->phy.acquire = sc->nvm.acquire
   2379 			    = wm_get_swfwhw_semaphore;
   2380 			sc->phy.release = sc->nvm.release
   2381 			    = wm_put_swfwhw_semaphore;
   2382 		}
   2383 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2384 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2385 			sc->sc_nvm_wordsize = 2048;
   2386 		} else {
   2387 			/* SPI */
   2388 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2389 			wm_nvm_set_addrbits_size_eecd(sc);
   2390 		}
   2391 		break;
   2392 	case WM_T_82575:
   2393 	case WM_T_82576:
   2394 	case WM_T_82580:
   2395 	case WM_T_I350:
   2396 	case WM_T_I354:
   2397 	case WM_T_80003:
   2398 		/* SPI */
   2399 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2400 		wm_nvm_set_addrbits_size_eecd(sc);
   2401 		if ((sc->sc_type == WM_T_80003)
   2402 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2403 			sc->nvm.read = wm_nvm_read_eerd;
   2404 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2405 		} else {
   2406 			sc->nvm.read = wm_nvm_read_spi;
   2407 			sc->sc_flags |= WM_F_LOCK_EECD;
   2408 		}
   2409 		sc->phy.acquire = wm_get_phy_82575;
   2410 		sc->phy.release = wm_put_phy_82575;
   2411 		sc->nvm.acquire = wm_get_nvm_80003;
   2412 		sc->nvm.release = wm_put_nvm_80003;
   2413 		break;
   2414 	case WM_T_ICH8:
   2415 	case WM_T_ICH9:
   2416 	case WM_T_ICH10:
   2417 	case WM_T_PCH:
   2418 	case WM_T_PCH2:
   2419 	case WM_T_PCH_LPT:
   2420 		sc->nvm.read = wm_nvm_read_ich8;
   2421 		/* FLASH */
   2422 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2423 		sc->sc_nvm_wordsize = 2048;
   2424 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2425 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2426 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2427 			aprint_error_dev(sc->sc_dev,
   2428 			    "can't map FLASH registers\n");
   2429 			goto out;
   2430 		}
   2431 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2432 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2433 		    ICH_FLASH_SECTOR_SIZE;
   2434 		sc->sc_ich8_flash_bank_size =
   2435 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2436 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2437 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2438 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2439 		sc->sc_flashreg_offset = 0;
   2440 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2441 		sc->phy.release = wm_put_swflag_ich8lan;
   2442 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2443 		sc->nvm.release = wm_put_nvm_ich8lan;
   2444 		break;
   2445 	case WM_T_PCH_SPT:
   2446 	case WM_T_PCH_CNP:
   2447 		sc->nvm.read = wm_nvm_read_spt;
   2448 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2449 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2450 		sc->sc_flasht = sc->sc_st;
   2451 		sc->sc_flashh = sc->sc_sh;
   2452 		sc->sc_ich8_flash_base = 0;
   2453 		sc->sc_nvm_wordsize =
   2454 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2455 		    * NVM_SIZE_MULTIPLIER;
   2456 		/* It is size in bytes, we want words */
   2457 		sc->sc_nvm_wordsize /= 2;
   2458 		/* Assume 2 banks */
   2459 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2460 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2461 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2462 		sc->phy.release = wm_put_swflag_ich8lan;
   2463 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2464 		sc->nvm.release = wm_put_nvm_ich8lan;
   2465 		break;
   2466 	case WM_T_I210:
   2467 	case WM_T_I211:
   2468 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2469 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2470 		if (wm_nvm_flash_presence_i210(sc)) {
   2471 			sc->nvm.read = wm_nvm_read_eerd;
   2472 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2473 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2474 			wm_nvm_set_addrbits_size_eecd(sc);
   2475 		} else {
   2476 			sc->nvm.read = wm_nvm_read_invm;
   2477 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2478 			sc->sc_nvm_wordsize = INVM_SIZE;
   2479 		}
   2480 		sc->phy.acquire = wm_get_phy_82575;
   2481 		sc->phy.release = wm_put_phy_82575;
   2482 		sc->nvm.acquire = wm_get_nvm_80003;
   2483 		sc->nvm.release = wm_put_nvm_80003;
   2484 		break;
   2485 	default:
   2486 		break;
   2487 	}
   2488 
   2489 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2490 	switch (sc->sc_type) {
   2491 	case WM_T_82571:
   2492 	case WM_T_82572:
   2493 		reg = CSR_READ(sc, WMREG_SWSM2);
   2494 		if ((reg & SWSM2_LOCK) == 0) {
   2495 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2496 			force_clear_smbi = true;
   2497 		} else
   2498 			force_clear_smbi = false;
   2499 		break;
   2500 	case WM_T_82573:
   2501 	case WM_T_82574:
   2502 	case WM_T_82583:
   2503 		force_clear_smbi = true;
   2504 		break;
   2505 	default:
   2506 		force_clear_smbi = false;
   2507 		break;
   2508 	}
   2509 	if (force_clear_smbi) {
   2510 		reg = CSR_READ(sc, WMREG_SWSM);
   2511 		if ((reg & SWSM_SMBI) != 0)
   2512 			aprint_error_dev(sc->sc_dev,
   2513 			    "Please update the Bootagent\n");
   2514 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2515 	}
   2516 
   2517 	/*
   2518 	 * Defer printing the EEPROM type until after verifying the checksum
   2519 	 * This allows the EEPROM type to be printed correctly in the case
   2520 	 * that no EEPROM is attached.
   2521 	 */
   2522 	/*
   2523 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2524 	 * this for later, so we can fail future reads from the EEPROM.
   2525 	 */
   2526 	if (wm_nvm_validate_checksum(sc)) {
   2527 		/*
   2528 		 * Read twice again because some PCI-e parts fail the
   2529 		 * first check due to the link being in sleep state.
   2530 		 */
   2531 		if (wm_nvm_validate_checksum(sc))
   2532 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2533 	}
   2534 
   2535 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2536 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2537 	else {
   2538 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2539 		    sc->sc_nvm_wordsize);
   2540 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2541 			aprint_verbose("iNVM");
   2542 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2543 			aprint_verbose("FLASH(HW)");
   2544 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2545 			aprint_verbose("FLASH");
   2546 		else {
   2547 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2548 				eetype = "SPI";
   2549 			else
   2550 				eetype = "MicroWire";
   2551 			aprint_verbose("(%d address bits) %s EEPROM",
   2552 			    sc->sc_nvm_addrbits, eetype);
   2553 		}
   2554 	}
   2555 	wm_nvm_version(sc);
   2556 	aprint_verbose("\n");
   2557 
   2558 	/*
   2559 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2560 	 * incorrect.
   2561 	 */
   2562 	wm_gmii_setup_phytype(sc, 0, 0);
   2563 
   2564 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2565 	switch (sc->sc_type) {
   2566 	case WM_T_ICH8:
   2567 	case WM_T_ICH9:
   2568 	case WM_T_ICH10:
   2569 	case WM_T_PCH:
   2570 	case WM_T_PCH2:
   2571 	case WM_T_PCH_LPT:
   2572 	case WM_T_PCH_SPT:
   2573 	case WM_T_PCH_CNP:
   2574 		apme_mask = WUC_APME;
   2575 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2576 		if ((eeprom_data & apme_mask) != 0)
   2577 			sc->sc_flags |= WM_F_WOL;
   2578 		break;
   2579 	default:
   2580 		break;
   2581 	}
   2582 
   2583 	/* Reset the chip to a known state. */
   2584 	wm_reset(sc);
   2585 
   2586 	/*
   2587 	 * Check for I21[01] PLL workaround.
   2588 	 *
   2589 	 * Three cases:
   2590 	 * a) Chip is I211.
   2591 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2592 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2593 	 */
   2594 	if (sc->sc_type == WM_T_I211)
   2595 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2596 	if (sc->sc_type == WM_T_I210) {
   2597 		if (!wm_nvm_flash_presence_i210(sc))
   2598 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2599 		else if ((sc->sc_nvm_ver_major < 3)
   2600 		    || ((sc->sc_nvm_ver_major == 3)
   2601 			&& (sc->sc_nvm_ver_minor < 25))) {
   2602 			aprint_verbose_dev(sc->sc_dev,
   2603 			    "ROM image version %d.%d is older than 3.25\n",
   2604 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2605 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2606 		}
   2607 	}
   2608 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2609 		wm_pll_workaround_i210(sc);
   2610 
   2611 	wm_get_wakeup(sc);
   2612 
   2613 	/* Non-AMT based hardware can now take control from firmware */
   2614 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2615 		wm_get_hw_control(sc);
   2616 
   2617 	/*
   2618 	 * Read the Ethernet address from the EEPROM, if not first found
   2619 	 * in device properties.
   2620 	 */
   2621 	ea = prop_dictionary_get(dict, "mac-address");
   2622 	if (ea != NULL) {
   2623 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2624 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2625 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2626 	} else {
   2627 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2628 			aprint_error_dev(sc->sc_dev,
   2629 			    "unable to read Ethernet address\n");
   2630 			goto out;
   2631 		}
   2632 	}
   2633 
   2634 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2635 	    ether_sprintf(enaddr));
   2636 
   2637 	/*
   2638 	 * Read the config info from the EEPROM, and set up various
   2639 	 * bits in the control registers based on their contents.
   2640 	 */
   2641 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2642 	if (pn != NULL) {
   2643 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2644 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2645 	} else {
   2646 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2647 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2648 			goto out;
   2649 		}
   2650 	}
   2651 
   2652 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2653 	if (pn != NULL) {
   2654 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2655 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2656 	} else {
   2657 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2658 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2659 			goto out;
   2660 		}
   2661 	}
   2662 
   2663 	/* check for WM_F_WOL */
   2664 	switch (sc->sc_type) {
   2665 	case WM_T_82542_2_0:
   2666 	case WM_T_82542_2_1:
   2667 	case WM_T_82543:
   2668 		/* dummy? */
   2669 		eeprom_data = 0;
   2670 		apme_mask = NVM_CFG3_APME;
   2671 		break;
   2672 	case WM_T_82544:
   2673 		apme_mask = NVM_CFG2_82544_APM_EN;
   2674 		eeprom_data = cfg2;
   2675 		break;
   2676 	case WM_T_82546:
   2677 	case WM_T_82546_3:
   2678 	case WM_T_82571:
   2679 	case WM_T_82572:
   2680 	case WM_T_82573:
   2681 	case WM_T_82574:
   2682 	case WM_T_82583:
   2683 	case WM_T_80003:
   2684 	case WM_T_82575:
   2685 	case WM_T_82576:
   2686 		apme_mask = NVM_CFG3_APME;
   2687 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2688 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2689 		break;
   2690 	case WM_T_82580:
   2691 	case WM_T_I350:
   2692 	case WM_T_I354:
   2693 	case WM_T_I210:
   2694 	case WM_T_I211:
   2695 		apme_mask = NVM_CFG3_APME;
   2696 		wm_nvm_read(sc,
   2697 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2698 		    1, &eeprom_data);
   2699 		break;
   2700 	case WM_T_ICH8:
   2701 	case WM_T_ICH9:
   2702 	case WM_T_ICH10:
   2703 	case WM_T_PCH:
   2704 	case WM_T_PCH2:
   2705 	case WM_T_PCH_LPT:
   2706 	case WM_T_PCH_SPT:
   2707 	case WM_T_PCH_CNP:
   2708 		/* Already checked before wm_reset () */
   2709 		apme_mask = eeprom_data = 0;
   2710 		break;
   2711 	default: /* XXX 82540 */
   2712 		apme_mask = NVM_CFG3_APME;
   2713 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2714 		break;
   2715 	}
   2716 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2717 	if ((eeprom_data & apme_mask) != 0)
   2718 		sc->sc_flags |= WM_F_WOL;
   2719 
   2720 	/*
   2721 	 * We have the eeprom settings, now apply the special cases
   2722 	 * where the eeprom may be wrong or the board won't support
   2723 	 * wake on lan on a particular port
   2724 	 */
   2725 	switch (sc->sc_pcidevid) {
   2726 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2727 		sc->sc_flags &= ~WM_F_WOL;
   2728 		break;
   2729 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2730 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2731 		/* Wake events only supported on port A for dual fiber
   2732 		 * regardless of eeprom setting */
   2733 		if (sc->sc_funcid == 1)
   2734 			sc->sc_flags &= ~WM_F_WOL;
   2735 		break;
   2736 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2737 		/* If quad port adapter, disable WoL on all but port A */
   2738 		if (sc->sc_funcid != 0)
   2739 			sc->sc_flags &= ~WM_F_WOL;
   2740 		break;
   2741 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2742 		/* Wake events only supported on port A for dual fiber
   2743 		 * regardless of eeprom setting */
   2744 		if (sc->sc_funcid == 1)
   2745 			sc->sc_flags &= ~WM_F_WOL;
   2746 		break;
   2747 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2748 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2749 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2750 		/* If quad port adapter, disable WoL on all but port A */
   2751 		if (sc->sc_funcid != 0)
   2752 			sc->sc_flags &= ~WM_F_WOL;
   2753 		break;
   2754 	}
   2755 
   2756 	if (sc->sc_type >= WM_T_82575) {
   2757 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2758 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2759 			    nvmword);
   2760 			if ((sc->sc_type == WM_T_82575) ||
   2761 			    (sc->sc_type == WM_T_82576)) {
   2762 				/* Check NVM for autonegotiation */
   2763 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2764 				    != 0)
   2765 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2766 			}
   2767 			if ((sc->sc_type == WM_T_82575) ||
   2768 			    (sc->sc_type == WM_T_I350)) {
   2769 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2770 					sc->sc_flags |= WM_F_MAS;
   2771 			}
   2772 		}
   2773 	}
   2774 
   2775 	/*
   2776 	 * XXX need special handling for some multiple port cards
   2777 	 * to disable a paticular port.
   2778 	 */
   2779 
   2780 	if (sc->sc_type >= WM_T_82544) {
   2781 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2782 		if (pn != NULL) {
   2783 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2784 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2785 		} else {
   2786 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2787 				aprint_error_dev(sc->sc_dev,
   2788 				    "unable to read SWDPIN\n");
   2789 				goto out;
   2790 			}
   2791 		}
   2792 	}
   2793 
   2794 	if (cfg1 & NVM_CFG1_ILOS)
   2795 		sc->sc_ctrl |= CTRL_ILOS;
   2796 
   2797 	/*
   2798 	 * XXX
   2799 	 * This code isn't correct because pin 2 and 3 are located
   2800 	 * in different position on newer chips. Check all datasheet.
   2801 	 *
   2802 	 * Until resolve this problem, check if a chip < 82580
   2803 	 */
   2804 	if (sc->sc_type <= WM_T_82580) {
   2805 		if (sc->sc_type >= WM_T_82544) {
   2806 			sc->sc_ctrl |=
   2807 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2808 			    CTRL_SWDPIO_SHIFT;
   2809 			sc->sc_ctrl |=
   2810 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2811 			    CTRL_SWDPINS_SHIFT;
   2812 		} else {
   2813 			sc->sc_ctrl |=
   2814 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2815 			    CTRL_SWDPIO_SHIFT;
   2816 		}
   2817 	}
   2818 
   2819 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2820 		wm_nvm_read(sc,
   2821 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2822 		    1, &nvmword);
   2823 		if (nvmword & NVM_CFG3_ILOS)
   2824 			sc->sc_ctrl |= CTRL_ILOS;
   2825 	}
   2826 
   2827 #if 0
   2828 	if (sc->sc_type >= WM_T_82544) {
   2829 		if (cfg1 & NVM_CFG1_IPS0)
   2830 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2831 		if (cfg1 & NVM_CFG1_IPS1)
   2832 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2833 		sc->sc_ctrl_ext |=
   2834 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2835 		    CTRL_EXT_SWDPIO_SHIFT;
   2836 		sc->sc_ctrl_ext |=
   2837 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2838 		    CTRL_EXT_SWDPINS_SHIFT;
   2839 	} else {
   2840 		sc->sc_ctrl_ext |=
   2841 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2842 		    CTRL_EXT_SWDPIO_SHIFT;
   2843 	}
   2844 #endif
   2845 
   2846 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2847 #if 0
   2848 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2849 #endif
   2850 
   2851 	if (sc->sc_type == WM_T_PCH) {
   2852 		uint16_t val;
   2853 
   2854 		/* Save the NVM K1 bit setting */
   2855 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2856 
   2857 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2858 			sc->sc_nvm_k1_enabled = 1;
   2859 		else
   2860 			sc->sc_nvm_k1_enabled = 0;
   2861 	}
   2862 
   2863 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2864 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2865 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2866 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2867 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2868 	    || sc->sc_type == WM_T_82573
   2869 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2870 		/* Copper only */
   2871 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2872 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2873 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2874 	    || (sc->sc_type ==WM_T_I211)) {
   2875 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2876 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2877 		switch (link_mode) {
   2878 		case CTRL_EXT_LINK_MODE_1000KX:
   2879 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2880 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2881 			break;
   2882 		case CTRL_EXT_LINK_MODE_SGMII:
   2883 			if (wm_sgmii_uses_mdio(sc)) {
   2884 				aprint_normal_dev(sc->sc_dev,
   2885 				    "SGMII(MDIO)\n");
   2886 				sc->sc_flags |= WM_F_SGMII;
   2887 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2888 				break;
   2889 			}
   2890 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2891 			/*FALLTHROUGH*/
   2892 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2893 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2894 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2895 				if (link_mode
   2896 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2897 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2898 					sc->sc_flags |= WM_F_SGMII;
   2899 					aprint_verbose_dev(sc->sc_dev,
   2900 					    "SGMII\n");
   2901 				} else {
   2902 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2903 					aprint_verbose_dev(sc->sc_dev,
   2904 					    "SERDES\n");
   2905 				}
   2906 				break;
   2907 			}
   2908 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2909 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2910 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2911 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2912 				sc->sc_flags |= WM_F_SGMII;
   2913 			}
   2914 			/* Do not change link mode for 100BaseFX */
   2915 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2916 				break;
   2917 
   2918 			/* Change current link mode setting */
   2919 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2920 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2921 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2922 			else
   2923 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2924 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2925 			break;
   2926 		case CTRL_EXT_LINK_MODE_GMII:
   2927 		default:
   2928 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2929 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2930 			break;
   2931 		}
   2932 
   2933 		reg &= ~CTRL_EXT_I2C_ENA;
   2934 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2935 			reg |= CTRL_EXT_I2C_ENA;
   2936 		else
   2937 			reg &= ~CTRL_EXT_I2C_ENA;
   2938 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2939 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2940 			if (!wm_sgmii_uses_mdio(sc))
   2941 				wm_gmii_setup_phytype(sc, 0, 0);
   2942 			wm_reset_mdicnfg_82580(sc);
   2943 		}
   2944 	} else if (sc->sc_type < WM_T_82543 ||
   2945 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2946 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2947 			aprint_error_dev(sc->sc_dev,
   2948 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2949 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2950 		}
   2951 	} else {
   2952 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2953 			aprint_error_dev(sc->sc_dev,
   2954 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2955 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2956 		}
   2957 	}
   2958 
   2959 	if (sc->sc_type >= WM_T_PCH2)
   2960 		sc->sc_flags |= WM_F_EEE;
   2961 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2962 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2963 		/* XXX: Need special handling for I354. (not yet) */
   2964 		if (sc->sc_type != WM_T_I354)
   2965 			sc->sc_flags |= WM_F_EEE;
   2966 	}
   2967 
   2968 	/*
   2969 	 * The I350 has a bug where it always strips the CRC whether
   2970 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   2971 	 */
   2972 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2973 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2974 		sc->sc_flags |= WM_F_CRC_STRIP;
   2975 
   2976 	/* Set device properties (macflags) */
   2977 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2978 
   2979 	if (sc->sc_flags != 0) {
   2980 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2981 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2982 	}
   2983 
   2984 #ifdef WM_MPSAFE
   2985 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2986 #else
   2987 	sc->sc_core_lock = NULL;
   2988 #endif
   2989 
   2990 	/* Initialize the media structures accordingly. */
   2991 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2992 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2993 	else
   2994 		wm_tbi_mediainit(sc); /* All others */
   2995 
   2996 	ifp = &sc->sc_ethercom.ec_if;
   2997 	xname = device_xname(sc->sc_dev);
   2998 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2999 	ifp->if_softc = sc;
   3000 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3001 #ifdef WM_MPSAFE
   3002 	ifp->if_extflags = IFEF_MPSAFE;
   3003 #endif
   3004 	ifp->if_ioctl = wm_ioctl;
   3005 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3006 		ifp->if_start = wm_nq_start;
   3007 		/*
   3008 		 * When the number of CPUs is one and the controller can use
   3009 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3010 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3011 		 * and the other is used for link status changing.
   3012 		 * In this situation, wm_nq_transmit() is disadvantageous
   3013 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3014 		 */
   3015 		if (wm_is_using_multiqueue(sc))
   3016 			ifp->if_transmit = wm_nq_transmit;
   3017 	} else {
   3018 		ifp->if_start = wm_start;
   3019 		/*
   3020 		 * wm_transmit() has the same disadvantage as wm_transmit().
   3021 		 */
   3022 		if (wm_is_using_multiqueue(sc))
   3023 			ifp->if_transmit = wm_transmit;
   3024 	}
   3025 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3026 	ifp->if_init = wm_init;
   3027 	ifp->if_stop = wm_stop;
   3028 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3029 	IFQ_SET_READY(&ifp->if_snd);
   3030 
   3031 	/* Check for jumbo frame */
   3032 	switch (sc->sc_type) {
   3033 	case WM_T_82573:
   3034 		/* XXX limited to 9234 if ASPM is disabled */
   3035 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3036 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3037 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3038 		break;
   3039 	case WM_T_82571:
   3040 	case WM_T_82572:
   3041 	case WM_T_82574:
   3042 	case WM_T_82583:
   3043 	case WM_T_82575:
   3044 	case WM_T_82576:
   3045 	case WM_T_82580:
   3046 	case WM_T_I350:
   3047 	case WM_T_I354:
   3048 	case WM_T_I210:
   3049 	case WM_T_I211:
   3050 	case WM_T_80003:
   3051 	case WM_T_ICH9:
   3052 	case WM_T_ICH10:
   3053 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3054 	case WM_T_PCH_LPT:
   3055 	case WM_T_PCH_SPT:
   3056 	case WM_T_PCH_CNP:
   3057 		/* XXX limited to 9234 */
   3058 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3059 		break;
   3060 	case WM_T_PCH:
   3061 		/* XXX limited to 4096 */
   3062 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3063 		break;
   3064 	case WM_T_82542_2_0:
   3065 	case WM_T_82542_2_1:
   3066 	case WM_T_ICH8:
   3067 		/* No support for jumbo frame */
   3068 		break;
   3069 	default:
   3070 		/* ETHER_MAX_LEN_JUMBO */
   3071 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3072 		break;
   3073 	}
   3074 
   3075 	/* If we're a i82543 or greater, we can support VLANs. */
   3076 	if (sc->sc_type >= WM_T_82543) {
   3077 		sc->sc_ethercom.ec_capabilities |=
   3078 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3079 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3080 	}
   3081 
   3082 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3083 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3084 
   3085 	/*
   3086 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
   3087 	 * on i82543 and later.
   3088 	 */
   3089 	if (sc->sc_type >= WM_T_82543) {
   3090 		ifp->if_capabilities |=
   3091 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3092 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3093 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3094 		    IFCAP_CSUM_TCPv6_Tx |
   3095 		    IFCAP_CSUM_UDPv6_Tx;
   3096 	}
   3097 
   3098 	/*
   3099 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3100 	 *
   3101 	 *	82541GI (8086:1076) ... no
   3102 	 *	82572EI (8086:10b9) ... yes
   3103 	 */
   3104 	if (sc->sc_type >= WM_T_82571) {
   3105 		ifp->if_capabilities |=
   3106 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3107 	}
   3108 
   3109 	/*
   3110 	 * If we're a i82544 or greater (except i82547), we can do
   3111 	 * TCP segmentation offload.
   3112 	 */
   3113 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3114 		ifp->if_capabilities |= IFCAP_TSOv4;
   3115 	}
   3116 
   3117 	if (sc->sc_type >= WM_T_82571) {
   3118 		ifp->if_capabilities |= IFCAP_TSOv6;
   3119 	}
   3120 
   3121 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3122 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3123 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3124 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3125 
   3126 	/* Attach the interface. */
   3127 	if_initialize(ifp);
   3128 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3129 	ether_ifattach(ifp, enaddr);
   3130 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3131 	if_register(ifp);
   3132 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3133 	    RND_FLAG_DEFAULT);
   3134 
   3135 #ifdef WM_EVENT_COUNTERS
   3136 	/* Attach event counters. */
   3137 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3138 	    NULL, xname, "linkintr");
   3139 
   3140 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3141 	    NULL, xname, "tx_xoff");
   3142 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3143 	    NULL, xname, "tx_xon");
   3144 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3145 	    NULL, xname, "rx_xoff");
   3146 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3147 	    NULL, xname, "rx_xon");
   3148 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3149 	    NULL, xname, "rx_macctl");
   3150 #endif /* WM_EVENT_COUNTERS */
   3151 
   3152 	sc->sc_txrx_use_workqueue = false;
   3153 
   3154 	if (wm_phy_need_linkdown_discard(sc)) {
   3155 		DPRINTF(sc, WM_DEBUG_LINK,
   3156 		    ("%s: %s: Set linkdown discard flag\n",
   3157 			device_xname(sc->sc_dev), __func__));
   3158 		wm_set_linkdown_discard(sc);
   3159 	}
   3160 
   3161 	wm_init_sysctls(sc);
   3162 
   3163 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3164 		pmf_class_network_register(self, ifp);
   3165 	else
   3166 		aprint_error_dev(self, "couldn't establish power handler\n");
   3167 
   3168 	sc->sc_flags |= WM_F_ATTACHED;
   3169 out:
   3170 	return;
   3171 }
   3172 
   3173 /* The detach function (ca_detach) */
   3174 static int
   3175 wm_detach(device_t self, int flags __unused)
   3176 {
   3177 	struct wm_softc *sc = device_private(self);
   3178 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3179 	int i;
   3180 
   3181 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3182 		return 0;
   3183 
   3184 	/* Stop the interface. Callouts are stopped in it. */
   3185 	wm_stop(ifp, 1);
   3186 
   3187 	pmf_device_deregister(self);
   3188 
   3189 	sysctl_teardown(&sc->sc_sysctllog);
   3190 
   3191 #ifdef WM_EVENT_COUNTERS
   3192 	evcnt_detach(&sc->sc_ev_linkintr);
   3193 
   3194 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3195 	evcnt_detach(&sc->sc_ev_tx_xon);
   3196 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3197 	evcnt_detach(&sc->sc_ev_rx_xon);
   3198 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3199 #endif /* WM_EVENT_COUNTERS */
   3200 
   3201 	rnd_detach_source(&sc->rnd_source);
   3202 
   3203 	/* Tell the firmware about the release */
   3204 	WM_CORE_LOCK(sc);
   3205 	wm_release_manageability(sc);
   3206 	wm_release_hw_control(sc);
   3207 	wm_enable_wakeup(sc);
   3208 	WM_CORE_UNLOCK(sc);
   3209 
   3210 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3211 
   3212 	ether_ifdetach(ifp);
   3213 	if_detach(ifp);
   3214 	if_percpuq_destroy(sc->sc_ipq);
   3215 
   3216 	/* Delete all remaining media. */
   3217 	ifmedia_fini(&sc->sc_mii.mii_media);
   3218 
   3219 	/* Unload RX dmamaps and free mbufs */
   3220 	for (i = 0; i < sc->sc_nqueues; i++) {
   3221 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3222 		mutex_enter(rxq->rxq_lock);
   3223 		wm_rxdrain(rxq);
   3224 		mutex_exit(rxq->rxq_lock);
   3225 	}
   3226 	/* Must unlock here */
   3227 
   3228 	/* Disestablish the interrupt handler */
   3229 	for (i = 0; i < sc->sc_nintrs; i++) {
   3230 		if (sc->sc_ihs[i] != NULL) {
   3231 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3232 			sc->sc_ihs[i] = NULL;
   3233 		}
   3234 	}
   3235 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3236 
   3237 	/* wm_stop() ensure workqueue is stopped. */
   3238 	workqueue_destroy(sc->sc_queue_wq);
   3239 
   3240 	for (i = 0; i < sc->sc_nqueues; i++)
   3241 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3242 
   3243 	wm_free_txrx_queues(sc);
   3244 
   3245 	/* Unmap the registers */
   3246 	if (sc->sc_ss) {
   3247 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3248 		sc->sc_ss = 0;
   3249 	}
   3250 	if (sc->sc_ios) {
   3251 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3252 		sc->sc_ios = 0;
   3253 	}
   3254 	if (sc->sc_flashs) {
   3255 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3256 		sc->sc_flashs = 0;
   3257 	}
   3258 
   3259 	if (sc->sc_core_lock)
   3260 		mutex_obj_free(sc->sc_core_lock);
   3261 	if (sc->sc_ich_phymtx)
   3262 		mutex_obj_free(sc->sc_ich_phymtx);
   3263 	if (sc->sc_ich_nvmmtx)
   3264 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3265 
   3266 	return 0;
   3267 }
   3268 
   3269 static bool
   3270 wm_suspend(device_t self, const pmf_qual_t *qual)
   3271 {
   3272 	struct wm_softc *sc = device_private(self);
   3273 
   3274 	wm_release_manageability(sc);
   3275 	wm_release_hw_control(sc);
   3276 	wm_enable_wakeup(sc);
   3277 
   3278 	return true;
   3279 }
   3280 
   3281 static bool
   3282 wm_resume(device_t self, const pmf_qual_t *qual)
   3283 {
   3284 	struct wm_softc *sc = device_private(self);
   3285 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3286 	pcireg_t reg;
   3287 	char buf[256];
   3288 
   3289 	reg = CSR_READ(sc, WMREG_WUS);
   3290 	if (reg != 0) {
   3291 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3292 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3293 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3294 	}
   3295 
   3296 	if (sc->sc_type >= WM_T_PCH2)
   3297 		wm_resume_workarounds_pchlan(sc);
   3298 	if ((ifp->if_flags & IFF_UP) == 0) {
   3299 		/* >= PCH_SPT hardware workaround before reset. */
   3300 		if (sc->sc_type >= WM_T_PCH_SPT)
   3301 			wm_flush_desc_rings(sc);
   3302 
   3303 		wm_reset(sc);
   3304 		/* Non-AMT based hardware can now take control from firmware */
   3305 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3306 			wm_get_hw_control(sc);
   3307 		wm_init_manageability(sc);
   3308 	} else {
   3309 		/*
   3310 		 * We called pmf_class_network_register(), so if_init() is
   3311 		 * automatically called when IFF_UP. wm_reset(),
   3312 		 * wm_get_hw_control() and wm_init_manageability() are called
   3313 		 * via wm_init().
   3314 		 */
   3315 	}
   3316 
   3317 	return true;
   3318 }
   3319 
   3320 /*
   3321  * wm_watchdog:		[ifnet interface function]
   3322  *
   3323  *	Watchdog timer handler.
   3324  */
   3325 static void
   3326 wm_watchdog(struct ifnet *ifp)
   3327 {
   3328 	int qid;
   3329 	struct wm_softc *sc = ifp->if_softc;
   3330 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3331 
   3332 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3333 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3334 
   3335 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3336 	}
   3337 
   3338 	/* IF any of queues hanged up, reset the interface. */
   3339 	if (hang_queue != 0) {
   3340 		(void)wm_init(ifp);
   3341 
   3342 		/*
   3343 		 * There are still some upper layer processing which call
   3344 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3345 		 */
   3346 		/* Try to get more packets going. */
   3347 		ifp->if_start(ifp);
   3348 	}
   3349 }
   3350 
   3351 
   3352 static void
   3353 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3354 {
   3355 
   3356 	mutex_enter(txq->txq_lock);
   3357 	if (txq->txq_sending &&
   3358 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3359 		wm_watchdog_txq_locked(ifp, txq, hang);
   3360 
   3361 	mutex_exit(txq->txq_lock);
   3362 }
   3363 
   3364 static void
   3365 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3366     uint16_t *hang)
   3367 {
   3368 	struct wm_softc *sc = ifp->if_softc;
   3369 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3370 
   3371 	KASSERT(mutex_owned(txq->txq_lock));
   3372 
   3373 	/*
   3374 	 * Since we're using delayed interrupts, sweep up
   3375 	 * before we report an error.
   3376 	 */
   3377 	wm_txeof(txq, UINT_MAX);
   3378 
   3379 	if (txq->txq_sending)
   3380 		*hang |= __BIT(wmq->wmq_id);
   3381 
   3382 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3383 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3384 		    device_xname(sc->sc_dev));
   3385 	} else {
   3386 #ifdef WM_DEBUG
   3387 		int i, j;
   3388 		struct wm_txsoft *txs;
   3389 #endif
   3390 		log(LOG_ERR,
   3391 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3392 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3393 		    txq->txq_next);
   3394 		if_statinc(ifp, if_oerrors);
   3395 #ifdef WM_DEBUG
   3396 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3397 		    i = WM_NEXTTXS(txq, i)) {
   3398 			txs = &txq->txq_soft[i];
   3399 			printf("txs %d tx %d -> %d\n",
   3400 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3401 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3402 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3403 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3404 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3405 					printf("\t %#08x%08x\n",
   3406 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3407 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3408 				} else {
   3409 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3410 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3411 					    txq->txq_descs[j].wtx_addr.wa_low);
   3412 					printf("\t %#04x%02x%02x%08x\n",
   3413 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3414 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3415 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3416 					    txq->txq_descs[j].wtx_cmdlen);
   3417 				}
   3418 				if (j == txs->txs_lastdesc)
   3419 					break;
   3420 			}
   3421 		}
   3422 #endif
   3423 	}
   3424 }
   3425 
   3426 /*
   3427  * wm_tick:
   3428  *
   3429  *	One second timer, used to check link status, sweep up
   3430  *	completed transmit jobs, etc.
   3431  */
   3432 static void
   3433 wm_tick(void *arg)
   3434 {
   3435 	struct wm_softc *sc = arg;
   3436 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3437 #ifndef WM_MPSAFE
   3438 	int s = splnet();
   3439 #endif
   3440 
   3441 	WM_CORE_LOCK(sc);
   3442 
   3443 	if (sc->sc_core_stopping) {
   3444 		WM_CORE_UNLOCK(sc);
   3445 #ifndef WM_MPSAFE
   3446 		splx(s);
   3447 #endif
   3448 		return;
   3449 	}
   3450 
   3451 	if (sc->sc_type >= WM_T_82542_2_1) {
   3452 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3453 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3454 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3455 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3456 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3457 	}
   3458 
   3459 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3460 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3461 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3462 	    + CSR_READ(sc, WMREG_CRCERRS)
   3463 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3464 	    + CSR_READ(sc, WMREG_SYMERRC)
   3465 	    + CSR_READ(sc, WMREG_RXERRC)
   3466 	    + CSR_READ(sc, WMREG_SEC)
   3467 	    + CSR_READ(sc, WMREG_CEXTERR)
   3468 	    + CSR_READ(sc, WMREG_RLEC));
   3469 	/*
   3470 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3471 	 * memory. It does not mean the number of dropped packet. Because
   3472 	 * ethernet controller can receive packets in such case if there is
   3473 	 * space in phy's FIFO.
   3474 	 *
   3475 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3476 	 * own EVCNT instead of if_iqdrops.
   3477 	 */
   3478 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3479 	IF_STAT_PUTREF(ifp);
   3480 
   3481 	if (sc->sc_flags & WM_F_HAS_MII)
   3482 		mii_tick(&sc->sc_mii);
   3483 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3484 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3485 		wm_serdes_tick(sc);
   3486 	else
   3487 		wm_tbi_tick(sc);
   3488 
   3489 	WM_CORE_UNLOCK(sc);
   3490 
   3491 	wm_watchdog(ifp);
   3492 
   3493 	callout_schedule(&sc->sc_tick_ch, hz);
   3494 }
   3495 
   3496 static int
   3497 wm_ifflags_cb(struct ethercom *ec)
   3498 {
   3499 	struct ifnet *ifp = &ec->ec_if;
   3500 	struct wm_softc *sc = ifp->if_softc;
   3501 	u_short iffchange;
   3502 	int ecchange;
   3503 	bool needreset = false;
   3504 	int rc = 0;
   3505 
   3506 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3507 		device_xname(sc->sc_dev), __func__));
   3508 
   3509 	WM_CORE_LOCK(sc);
   3510 
   3511 	/*
   3512 	 * Check for if_flags.
   3513 	 * Main usage is to prevent linkdown when opening bpf.
   3514 	 */
   3515 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3516 	sc->sc_if_flags = ifp->if_flags;
   3517 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3518 		needreset = true;
   3519 		goto ec;
   3520 	}
   3521 
   3522 	/* iff related updates */
   3523 	if ((iffchange & IFF_PROMISC) != 0)
   3524 		wm_set_filter(sc);
   3525 
   3526 	wm_set_vlan(sc);
   3527 
   3528 ec:
   3529 	/* Check for ec_capenable. */
   3530 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3531 	sc->sc_ec_capenable = ec->ec_capenable;
   3532 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3533 		needreset = true;
   3534 		goto out;
   3535 	}
   3536 
   3537 	/* ec related updates */
   3538 	wm_set_eee(sc);
   3539 
   3540 out:
   3541 	if (needreset)
   3542 		rc = ENETRESET;
   3543 	WM_CORE_UNLOCK(sc);
   3544 
   3545 	return rc;
   3546 }
   3547 
   3548 static bool
   3549 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3550 {
   3551 
   3552 	switch (sc->sc_phytype) {
   3553 	case WMPHY_82577: /* ihphy */
   3554 	case WMPHY_82578: /* atphy */
   3555 	case WMPHY_82579: /* ihphy */
   3556 	case WMPHY_I217: /* ihphy */
   3557 	case WMPHY_82580: /* ihphy */
   3558 	case WMPHY_I350: /* ihphy */
   3559 		return true;
   3560 	default:
   3561 		return false;
   3562 	}
   3563 }
   3564 
   3565 static void
   3566 wm_set_linkdown_discard(struct wm_softc *sc)
   3567 {
   3568 
   3569 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3570 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3571 
   3572 		mutex_enter(txq->txq_lock);
   3573 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3574 		mutex_exit(txq->txq_lock);
   3575 	}
   3576 }
   3577 
   3578 static void
   3579 wm_clear_linkdown_discard(struct wm_softc *sc)
   3580 {
   3581 
   3582 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3583 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3584 
   3585 		mutex_enter(txq->txq_lock);
   3586 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   3587 		mutex_exit(txq->txq_lock);
   3588 	}
   3589 }
   3590 
   3591 /*
   3592  * wm_ioctl:		[ifnet interface function]
   3593  *
   3594  *	Handle control requests from the operator.
   3595  */
   3596 static int
   3597 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3598 {
   3599 	struct wm_softc *sc = ifp->if_softc;
   3600 	struct ifreq *ifr = (struct ifreq *)data;
   3601 	struct ifaddr *ifa = (struct ifaddr *)data;
   3602 	struct sockaddr_dl *sdl;
   3603 	int s, error;
   3604 
   3605 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3606 		device_xname(sc->sc_dev), __func__));
   3607 
   3608 #ifndef WM_MPSAFE
   3609 	s = splnet();
   3610 #endif
   3611 	switch (cmd) {
   3612 	case SIOCSIFMEDIA:
   3613 		WM_CORE_LOCK(sc);
   3614 		/* Flow control requires full-duplex mode. */
   3615 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3616 		    (ifr->ifr_media & IFM_FDX) == 0)
   3617 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3618 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3619 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3620 				/* We can do both TXPAUSE and RXPAUSE. */
   3621 				ifr->ifr_media |=
   3622 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3623 			}
   3624 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3625 		}
   3626 		WM_CORE_UNLOCK(sc);
   3627 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3628 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   3629 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
   3630 				DPRINTF(sc, WM_DEBUG_LINK,
   3631 				    ("%s: %s: Set linkdown discard flag\n",
   3632 					device_xname(sc->sc_dev), __func__));
   3633 				wm_set_linkdown_discard(sc);
   3634 			}
   3635 		}
   3636 		break;
   3637 	case SIOCINITIFADDR:
   3638 		WM_CORE_LOCK(sc);
   3639 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3640 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3641 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3642 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3643 			/* Unicast address is the first multicast entry */
   3644 			wm_set_filter(sc);
   3645 			error = 0;
   3646 			WM_CORE_UNLOCK(sc);
   3647 			break;
   3648 		}
   3649 		WM_CORE_UNLOCK(sc);
   3650 		/*FALLTHROUGH*/
   3651 	default:
   3652 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   3653 			if (((ifp->if_flags & IFF_UP) != 0) &&
   3654 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
   3655 				DPRINTF(sc, WM_DEBUG_LINK,
   3656 				    ("%s: %s: Set linkdown discard flag\n",
   3657 					device_xname(sc->sc_dev), __func__));
   3658 				wm_set_linkdown_discard(sc);
   3659 			}
   3660 		}
   3661 #ifdef WM_MPSAFE
   3662 		s = splnet();
   3663 #endif
   3664 		/* It may call wm_start, so unlock here */
   3665 		error = ether_ioctl(ifp, cmd, data);
   3666 #ifdef WM_MPSAFE
   3667 		splx(s);
   3668 #endif
   3669 		if (error != ENETRESET)
   3670 			break;
   3671 
   3672 		error = 0;
   3673 
   3674 		if (cmd == SIOCSIFCAP)
   3675 			error = (*ifp->if_init)(ifp);
   3676 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3677 			;
   3678 		else if (ifp->if_flags & IFF_RUNNING) {
   3679 			/*
   3680 			 * Multicast list has changed; set the hardware filter
   3681 			 * accordingly.
   3682 			 */
   3683 			WM_CORE_LOCK(sc);
   3684 			wm_set_filter(sc);
   3685 			WM_CORE_UNLOCK(sc);
   3686 		}
   3687 		break;
   3688 	}
   3689 
   3690 #ifndef WM_MPSAFE
   3691 	splx(s);
   3692 #endif
   3693 	return error;
   3694 }
   3695 
   3696 /* MAC address related */
   3697 
   3698 /*
   3699  * Get the offset of MAC address and return it.
   3700  * If error occured, use offset 0.
   3701  */
   3702 static uint16_t
   3703 wm_check_alt_mac_addr(struct wm_softc *sc)
   3704 {
   3705 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3706 	uint16_t offset = NVM_OFF_MACADDR;
   3707 
   3708 	/* Try to read alternative MAC address pointer */
   3709 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3710 		return 0;
   3711 
   3712 	/* Check pointer if it's valid or not. */
   3713 	if ((offset == 0x0000) || (offset == 0xffff))
   3714 		return 0;
   3715 
   3716 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3717 	/*
   3718 	 * Check whether alternative MAC address is valid or not.
   3719 	 * Some cards have non 0xffff pointer but those don't use
   3720 	 * alternative MAC address in reality.
   3721 	 *
   3722 	 * Check whether the broadcast bit is set or not.
   3723 	 */
   3724 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3725 		if (((myea[0] & 0xff) & 0x01) == 0)
   3726 			return offset; /* Found */
   3727 
   3728 	/* Not found */
   3729 	return 0;
   3730 }
   3731 
   3732 static int
   3733 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3734 {
   3735 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3736 	uint16_t offset = NVM_OFF_MACADDR;
   3737 	int do_invert = 0;
   3738 
   3739 	switch (sc->sc_type) {
   3740 	case WM_T_82580:
   3741 	case WM_T_I350:
   3742 	case WM_T_I354:
   3743 		/* EEPROM Top Level Partitioning */
   3744 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3745 		break;
   3746 	case WM_T_82571:
   3747 	case WM_T_82575:
   3748 	case WM_T_82576:
   3749 	case WM_T_80003:
   3750 	case WM_T_I210:
   3751 	case WM_T_I211:
   3752 		offset = wm_check_alt_mac_addr(sc);
   3753 		if (offset == 0)
   3754 			if ((sc->sc_funcid & 0x01) == 1)
   3755 				do_invert = 1;
   3756 		break;
   3757 	default:
   3758 		if ((sc->sc_funcid & 0x01) == 1)
   3759 			do_invert = 1;
   3760 		break;
   3761 	}
   3762 
   3763 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3764 		goto bad;
   3765 
   3766 	enaddr[0] = myea[0] & 0xff;
   3767 	enaddr[1] = myea[0] >> 8;
   3768 	enaddr[2] = myea[1] & 0xff;
   3769 	enaddr[3] = myea[1] >> 8;
   3770 	enaddr[4] = myea[2] & 0xff;
   3771 	enaddr[5] = myea[2] >> 8;
   3772 
   3773 	/*
   3774 	 * Toggle the LSB of the MAC address on the second port
   3775 	 * of some dual port cards.
   3776 	 */
   3777 	if (do_invert != 0)
   3778 		enaddr[5] ^= 1;
   3779 
   3780 	return 0;
   3781 
   3782  bad:
   3783 	return -1;
   3784 }
   3785 
   3786 /*
   3787  * wm_set_ral:
   3788  *
   3789  *	Set an entery in the receive address list.
   3790  */
   3791 static void
   3792 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3793 {
   3794 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3795 	uint32_t wlock_mac;
   3796 	int rv;
   3797 
   3798 	if (enaddr != NULL) {
   3799 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3800 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3801 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3802 		ral_hi |= RAL_AV;
   3803 	} else {
   3804 		ral_lo = 0;
   3805 		ral_hi = 0;
   3806 	}
   3807 
   3808 	switch (sc->sc_type) {
   3809 	case WM_T_82542_2_0:
   3810 	case WM_T_82542_2_1:
   3811 	case WM_T_82543:
   3812 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3813 		CSR_WRITE_FLUSH(sc);
   3814 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3815 		CSR_WRITE_FLUSH(sc);
   3816 		break;
   3817 	case WM_T_PCH2:
   3818 	case WM_T_PCH_LPT:
   3819 	case WM_T_PCH_SPT:
   3820 	case WM_T_PCH_CNP:
   3821 		if (idx == 0) {
   3822 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3823 			CSR_WRITE_FLUSH(sc);
   3824 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3825 			CSR_WRITE_FLUSH(sc);
   3826 			return;
   3827 		}
   3828 		if (sc->sc_type != WM_T_PCH2) {
   3829 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3830 			    FWSM_WLOCK_MAC);
   3831 			addrl = WMREG_SHRAL(idx - 1);
   3832 			addrh = WMREG_SHRAH(idx - 1);
   3833 		} else {
   3834 			wlock_mac = 0;
   3835 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3836 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3837 		}
   3838 
   3839 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3840 			rv = wm_get_swflag_ich8lan(sc);
   3841 			if (rv != 0)
   3842 				return;
   3843 			CSR_WRITE(sc, addrl, ral_lo);
   3844 			CSR_WRITE_FLUSH(sc);
   3845 			CSR_WRITE(sc, addrh, ral_hi);
   3846 			CSR_WRITE_FLUSH(sc);
   3847 			wm_put_swflag_ich8lan(sc);
   3848 		}
   3849 
   3850 		break;
   3851 	default:
   3852 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3853 		CSR_WRITE_FLUSH(sc);
   3854 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3855 		CSR_WRITE_FLUSH(sc);
   3856 		break;
   3857 	}
   3858 }
   3859 
   3860 /*
   3861  * wm_mchash:
   3862  *
   3863  *	Compute the hash of the multicast address for the 4096-bit
   3864  *	multicast filter.
   3865  */
   3866 static uint32_t
   3867 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3868 {
   3869 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3870 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3871 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3872 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3873 	uint32_t hash;
   3874 
   3875 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3876 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3877 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3878 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3879 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3880 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3881 		return (hash & 0x3ff);
   3882 	}
   3883 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3884 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3885 
   3886 	return (hash & 0xfff);
   3887 }
   3888 
   3889 /*
   3890  *
   3891  *
   3892  */
   3893 static int
   3894 wm_rar_count(struct wm_softc *sc)
   3895 {
   3896 	int size;
   3897 
   3898 	switch (sc->sc_type) {
   3899 	case WM_T_ICH8:
   3900 		size = WM_RAL_TABSIZE_ICH8 -1;
   3901 		break;
   3902 	case WM_T_ICH9:
   3903 	case WM_T_ICH10:
   3904 	case WM_T_PCH:
   3905 		size = WM_RAL_TABSIZE_ICH8;
   3906 		break;
   3907 	case WM_T_PCH2:
   3908 		size = WM_RAL_TABSIZE_PCH2;
   3909 		break;
   3910 	case WM_T_PCH_LPT:
   3911 	case WM_T_PCH_SPT:
   3912 	case WM_T_PCH_CNP:
   3913 		size = WM_RAL_TABSIZE_PCH_LPT;
   3914 		break;
   3915 	case WM_T_82575:
   3916 	case WM_T_I210:
   3917 	case WM_T_I211:
   3918 		size = WM_RAL_TABSIZE_82575;
   3919 		break;
   3920 	case WM_T_82576:
   3921 	case WM_T_82580:
   3922 		size = WM_RAL_TABSIZE_82576;
   3923 		break;
   3924 	case WM_T_I350:
   3925 	case WM_T_I354:
   3926 		size = WM_RAL_TABSIZE_I350;
   3927 		break;
   3928 	default:
   3929 		size = WM_RAL_TABSIZE;
   3930 	}
   3931 
   3932 	return size;
   3933 }
   3934 
   3935 /*
   3936  * wm_set_filter:
   3937  *
   3938  *	Set up the receive filter.
   3939  */
   3940 static void
   3941 wm_set_filter(struct wm_softc *sc)
   3942 {
   3943 	struct ethercom *ec = &sc->sc_ethercom;
   3944 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3945 	struct ether_multi *enm;
   3946 	struct ether_multistep step;
   3947 	bus_addr_t mta_reg;
   3948 	uint32_t hash, reg, bit;
   3949 	int i, size, ralmax, rv;
   3950 
   3951 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3952 		device_xname(sc->sc_dev), __func__));
   3953 
   3954 	if (sc->sc_type >= WM_T_82544)
   3955 		mta_reg = WMREG_CORDOVA_MTA;
   3956 	else
   3957 		mta_reg = WMREG_MTA;
   3958 
   3959 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3960 
   3961 	if (ifp->if_flags & IFF_BROADCAST)
   3962 		sc->sc_rctl |= RCTL_BAM;
   3963 	if (ifp->if_flags & IFF_PROMISC) {
   3964 		sc->sc_rctl |= RCTL_UPE;
   3965 		ETHER_LOCK(ec);
   3966 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3967 		ETHER_UNLOCK(ec);
   3968 		goto allmulti;
   3969 	}
   3970 
   3971 	/*
   3972 	 * Set the station address in the first RAL slot, and
   3973 	 * clear the remaining slots.
   3974 	 */
   3975 	size = wm_rar_count(sc);
   3976 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3977 
   3978 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3979 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3980 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3981 		switch (i) {
   3982 		case 0:
   3983 			/* We can use all entries */
   3984 			ralmax = size;
   3985 			break;
   3986 		case 1:
   3987 			/* Only RAR[0] */
   3988 			ralmax = 1;
   3989 			break;
   3990 		default:
   3991 			/* Available SHRA + RAR[0] */
   3992 			ralmax = i + 1;
   3993 		}
   3994 	} else
   3995 		ralmax = size;
   3996 	for (i = 1; i < size; i++) {
   3997 		if (i < ralmax)
   3998 			wm_set_ral(sc, NULL, i);
   3999 	}
   4000 
   4001 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4002 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4003 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4004 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   4005 		size = WM_ICH8_MC_TABSIZE;
   4006 	else
   4007 		size = WM_MC_TABSIZE;
   4008 	/* Clear out the multicast table. */
   4009 	for (i = 0; i < size; i++) {
   4010 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4011 		CSR_WRITE_FLUSH(sc);
   4012 	}
   4013 
   4014 	ETHER_LOCK(ec);
   4015 	ETHER_FIRST_MULTI(step, ec, enm);
   4016 	while (enm != NULL) {
   4017 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4018 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4019 			ETHER_UNLOCK(ec);
   4020 			/*
   4021 			 * We must listen to a range of multicast addresses.
   4022 			 * For now, just accept all multicasts, rather than
   4023 			 * trying to set only those filter bits needed to match
   4024 			 * the range.  (At this time, the only use of address
   4025 			 * ranges is for IP multicast routing, for which the
   4026 			 * range is big enough to require all bits set.)
   4027 			 */
   4028 			goto allmulti;
   4029 		}
   4030 
   4031 		hash = wm_mchash(sc, enm->enm_addrlo);
   4032 
   4033 		reg = (hash >> 5);
   4034 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4035 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4036 		    || (sc->sc_type == WM_T_PCH2)
   4037 		    || (sc->sc_type == WM_T_PCH_LPT)
   4038 		    || (sc->sc_type == WM_T_PCH_SPT)
   4039 		    || (sc->sc_type == WM_T_PCH_CNP))
   4040 			reg &= 0x1f;
   4041 		else
   4042 			reg &= 0x7f;
   4043 		bit = hash & 0x1f;
   4044 
   4045 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4046 		hash |= 1U << bit;
   4047 
   4048 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4049 			/*
   4050 			 * 82544 Errata 9: Certain register cannot be written
   4051 			 * with particular alignments in PCI-X bus operation
   4052 			 * (FCAH, MTA and VFTA).
   4053 			 */
   4054 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4055 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4056 			CSR_WRITE_FLUSH(sc);
   4057 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4058 			CSR_WRITE_FLUSH(sc);
   4059 		} else {
   4060 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4061 			CSR_WRITE_FLUSH(sc);
   4062 		}
   4063 
   4064 		ETHER_NEXT_MULTI(step, enm);
   4065 	}
   4066 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4067 	ETHER_UNLOCK(ec);
   4068 
   4069 	goto setit;
   4070 
   4071  allmulti:
   4072 	sc->sc_rctl |= RCTL_MPE;
   4073 
   4074  setit:
   4075 	if (sc->sc_type >= WM_T_PCH2) {
   4076 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4077 		    && (ifp->if_mtu > ETHERMTU))
   4078 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4079 		else
   4080 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4081 		if (rv != 0)
   4082 			device_printf(sc->sc_dev,
   4083 			    "Failed to do workaround for jumbo frame.\n");
   4084 	}
   4085 
   4086 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4087 }
   4088 
   4089 /* Reset and init related */
   4090 
   4091 static void
   4092 wm_set_vlan(struct wm_softc *sc)
   4093 {
   4094 
   4095 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4096 		device_xname(sc->sc_dev), __func__));
   4097 
   4098 	/* Deal with VLAN enables. */
   4099 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4100 		sc->sc_ctrl |= CTRL_VME;
   4101 	else
   4102 		sc->sc_ctrl &= ~CTRL_VME;
   4103 
   4104 	/* Write the control registers. */
   4105 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4106 }
   4107 
   4108 static void
   4109 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4110 {
   4111 	uint32_t gcr;
   4112 	pcireg_t ctrl2;
   4113 
   4114 	gcr = CSR_READ(sc, WMREG_GCR);
   4115 
   4116 	/* Only take action if timeout value is defaulted to 0 */
   4117 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4118 		goto out;
   4119 
   4120 	if ((gcr & GCR_CAP_VER2) == 0) {
   4121 		gcr |= GCR_CMPL_TMOUT_10MS;
   4122 		goto out;
   4123 	}
   4124 
   4125 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4126 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4127 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4128 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4129 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4130 
   4131 out:
   4132 	/* Disable completion timeout resend */
   4133 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4134 
   4135 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4136 }
   4137 
   4138 void
   4139 wm_get_auto_rd_done(struct wm_softc *sc)
   4140 {
   4141 	int i;
   4142 
   4143 	/* wait for eeprom to reload */
   4144 	switch (sc->sc_type) {
   4145 	case WM_T_82571:
   4146 	case WM_T_82572:
   4147 	case WM_T_82573:
   4148 	case WM_T_82574:
   4149 	case WM_T_82583:
   4150 	case WM_T_82575:
   4151 	case WM_T_82576:
   4152 	case WM_T_82580:
   4153 	case WM_T_I350:
   4154 	case WM_T_I354:
   4155 	case WM_T_I210:
   4156 	case WM_T_I211:
   4157 	case WM_T_80003:
   4158 	case WM_T_ICH8:
   4159 	case WM_T_ICH9:
   4160 		for (i = 0; i < 10; i++) {
   4161 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4162 				break;
   4163 			delay(1000);
   4164 		}
   4165 		if (i == 10) {
   4166 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4167 			    "complete\n", device_xname(sc->sc_dev));
   4168 		}
   4169 		break;
   4170 	default:
   4171 		break;
   4172 	}
   4173 }
   4174 
   4175 void
   4176 wm_lan_init_done(struct wm_softc *sc)
   4177 {
   4178 	uint32_t reg = 0;
   4179 	int i;
   4180 
   4181 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4182 		device_xname(sc->sc_dev), __func__));
   4183 
   4184 	/* Wait for eeprom to reload */
   4185 	switch (sc->sc_type) {
   4186 	case WM_T_ICH10:
   4187 	case WM_T_PCH:
   4188 	case WM_T_PCH2:
   4189 	case WM_T_PCH_LPT:
   4190 	case WM_T_PCH_SPT:
   4191 	case WM_T_PCH_CNP:
   4192 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4193 			reg = CSR_READ(sc, WMREG_STATUS);
   4194 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4195 				break;
   4196 			delay(100);
   4197 		}
   4198 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4199 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4200 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4201 		}
   4202 		break;
   4203 	default:
   4204 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4205 		    __func__);
   4206 		break;
   4207 	}
   4208 
   4209 	reg &= ~STATUS_LAN_INIT_DONE;
   4210 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4211 }
   4212 
   4213 void
   4214 wm_get_cfg_done(struct wm_softc *sc)
   4215 {
   4216 	int mask;
   4217 	uint32_t reg;
   4218 	int i;
   4219 
   4220 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4221 		device_xname(sc->sc_dev), __func__));
   4222 
   4223 	/* Wait for eeprom to reload */
   4224 	switch (sc->sc_type) {
   4225 	case WM_T_82542_2_0:
   4226 	case WM_T_82542_2_1:
   4227 		/* null */
   4228 		break;
   4229 	case WM_T_82543:
   4230 	case WM_T_82544:
   4231 	case WM_T_82540:
   4232 	case WM_T_82545:
   4233 	case WM_T_82545_3:
   4234 	case WM_T_82546:
   4235 	case WM_T_82546_3:
   4236 	case WM_T_82541:
   4237 	case WM_T_82541_2:
   4238 	case WM_T_82547:
   4239 	case WM_T_82547_2:
   4240 	case WM_T_82573:
   4241 	case WM_T_82574:
   4242 	case WM_T_82583:
   4243 		/* generic */
   4244 		delay(10*1000);
   4245 		break;
   4246 	case WM_T_80003:
   4247 	case WM_T_82571:
   4248 	case WM_T_82572:
   4249 	case WM_T_82575:
   4250 	case WM_T_82576:
   4251 	case WM_T_82580:
   4252 	case WM_T_I350:
   4253 	case WM_T_I354:
   4254 	case WM_T_I210:
   4255 	case WM_T_I211:
   4256 		if (sc->sc_type == WM_T_82571) {
   4257 			/* Only 82571 shares port 0 */
   4258 			mask = EEMNGCTL_CFGDONE_0;
   4259 		} else
   4260 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4261 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4262 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4263 				break;
   4264 			delay(1000);
   4265 		}
   4266 		if (i >= WM_PHY_CFG_TIMEOUT)
   4267 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4268 				device_xname(sc->sc_dev), __func__));
   4269 		break;
   4270 	case WM_T_ICH8:
   4271 	case WM_T_ICH9:
   4272 	case WM_T_ICH10:
   4273 	case WM_T_PCH:
   4274 	case WM_T_PCH2:
   4275 	case WM_T_PCH_LPT:
   4276 	case WM_T_PCH_SPT:
   4277 	case WM_T_PCH_CNP:
   4278 		delay(10*1000);
   4279 		if (sc->sc_type >= WM_T_ICH10)
   4280 			wm_lan_init_done(sc);
   4281 		else
   4282 			wm_get_auto_rd_done(sc);
   4283 
   4284 		/* Clear PHY Reset Asserted bit */
   4285 		reg = CSR_READ(sc, WMREG_STATUS);
   4286 		if ((reg & STATUS_PHYRA) != 0)
   4287 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4288 		break;
   4289 	default:
   4290 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4291 		    __func__);
   4292 		break;
   4293 	}
   4294 }
   4295 
   4296 int
   4297 wm_phy_post_reset(struct wm_softc *sc)
   4298 {
   4299 	device_t dev = sc->sc_dev;
   4300 	uint16_t reg;
   4301 	int rv = 0;
   4302 
   4303 	/* This function is only for ICH8 and newer. */
   4304 	if (sc->sc_type < WM_T_ICH8)
   4305 		return 0;
   4306 
   4307 	if (wm_phy_resetisblocked(sc)) {
   4308 		/* XXX */
   4309 		device_printf(dev, "PHY is blocked\n");
   4310 		return -1;
   4311 	}
   4312 
   4313 	/* Allow time for h/w to get to quiescent state after reset */
   4314 	delay(10*1000);
   4315 
   4316 	/* Perform any necessary post-reset workarounds */
   4317 	if (sc->sc_type == WM_T_PCH)
   4318 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4319 	else if (sc->sc_type == WM_T_PCH2)
   4320 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4321 	if (rv != 0)
   4322 		return rv;
   4323 
   4324 	/* Clear the host wakeup bit after lcd reset */
   4325 	if (sc->sc_type >= WM_T_PCH) {
   4326 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4327 		reg &= ~BM_WUC_HOST_WU_BIT;
   4328 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4329 	}
   4330 
   4331 	/* Configure the LCD with the extended configuration region in NVM */
   4332 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4333 		return rv;
   4334 
   4335 	/* Configure the LCD with the OEM bits in NVM */
   4336 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4337 
   4338 	if (sc->sc_type == WM_T_PCH2) {
   4339 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4340 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4341 			delay(10 * 1000);
   4342 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4343 		}
   4344 		/* Set EEE LPI Update Timer to 200usec */
   4345 		rv = sc->phy.acquire(sc);
   4346 		if (rv)
   4347 			return rv;
   4348 		rv = wm_write_emi_reg_locked(dev,
   4349 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4350 		sc->phy.release(sc);
   4351 	}
   4352 
   4353 	return rv;
   4354 }
   4355 
   4356 /* Only for PCH and newer */
   4357 static int
   4358 wm_write_smbus_addr(struct wm_softc *sc)
   4359 {
   4360 	uint32_t strap, freq;
   4361 	uint16_t phy_data;
   4362 	int rv;
   4363 
   4364 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4365 		device_xname(sc->sc_dev), __func__));
   4366 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4367 
   4368 	strap = CSR_READ(sc, WMREG_STRAP);
   4369 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4370 
   4371 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4372 	if (rv != 0)
   4373 		return -1;
   4374 
   4375 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4376 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4377 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4378 
   4379 	if (sc->sc_phytype == WMPHY_I217) {
   4380 		/* Restore SMBus frequency */
   4381 		if (freq --) {
   4382 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4383 			    | HV_SMB_ADDR_FREQ_HIGH);
   4384 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4385 			    HV_SMB_ADDR_FREQ_LOW);
   4386 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4387 			    HV_SMB_ADDR_FREQ_HIGH);
   4388 		} else
   4389 			DPRINTF(sc, WM_DEBUG_INIT,
   4390 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4391 				device_xname(sc->sc_dev), __func__));
   4392 	}
   4393 
   4394 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4395 	    phy_data);
   4396 }
   4397 
   4398 static int
   4399 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4400 {
   4401 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4402 	uint16_t phy_page = 0;
   4403 	int rv = 0;
   4404 
   4405 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4406 		device_xname(sc->sc_dev), __func__));
   4407 
   4408 	switch (sc->sc_type) {
   4409 	case WM_T_ICH8:
   4410 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4411 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4412 			return 0;
   4413 
   4414 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4415 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4416 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4417 			break;
   4418 		}
   4419 		/* FALLTHROUGH */
   4420 	case WM_T_PCH:
   4421 	case WM_T_PCH2:
   4422 	case WM_T_PCH_LPT:
   4423 	case WM_T_PCH_SPT:
   4424 	case WM_T_PCH_CNP:
   4425 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4426 		break;
   4427 	default:
   4428 		return 0;
   4429 	}
   4430 
   4431 	if ((rv = sc->phy.acquire(sc)) != 0)
   4432 		return rv;
   4433 
   4434 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4435 	if ((reg & sw_cfg_mask) == 0)
   4436 		goto release;
   4437 
   4438 	/*
   4439 	 * Make sure HW does not configure LCD from PHY extended configuration
   4440 	 * before SW configuration
   4441 	 */
   4442 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4443 	if ((sc->sc_type < WM_T_PCH2)
   4444 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4445 		goto release;
   4446 
   4447 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4448 		device_xname(sc->sc_dev), __func__));
   4449 	/* word_addr is in DWORD */
   4450 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4451 
   4452 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4453 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4454 	if (cnf_size == 0)
   4455 		goto release;
   4456 
   4457 	if (((sc->sc_type == WM_T_PCH)
   4458 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4459 	    || (sc->sc_type > WM_T_PCH)) {
   4460 		/*
   4461 		 * HW configures the SMBus address and LEDs when the OEM and
   4462 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4463 		 * are cleared, SW will configure them instead.
   4464 		 */
   4465 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4466 			device_xname(sc->sc_dev), __func__));
   4467 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4468 			goto release;
   4469 
   4470 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4471 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4472 		    (uint16_t)reg);
   4473 		if (rv != 0)
   4474 			goto release;
   4475 	}
   4476 
   4477 	/* Configure LCD from extended configuration region. */
   4478 	for (i = 0; i < cnf_size; i++) {
   4479 		uint16_t reg_data, reg_addr;
   4480 
   4481 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4482 			goto release;
   4483 
   4484 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4485 			goto release;
   4486 
   4487 		if (reg_addr == IGPHY_PAGE_SELECT)
   4488 			phy_page = reg_data;
   4489 
   4490 		reg_addr &= IGPHY_MAXREGADDR;
   4491 		reg_addr |= phy_page;
   4492 
   4493 		KASSERT(sc->phy.writereg_locked != NULL);
   4494 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4495 		    reg_data);
   4496 	}
   4497 
   4498 release:
   4499 	sc->phy.release(sc);
   4500 	return rv;
   4501 }
   4502 
   4503 /*
   4504  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4505  *  @sc:       pointer to the HW structure
   4506  *  @d0_state: boolean if entering d0 or d3 device state
   4507  *
   4508  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4509  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4510  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4511  */
   4512 int
   4513 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4514 {
   4515 	uint32_t mac_reg;
   4516 	uint16_t oem_reg;
   4517 	int rv;
   4518 
   4519 	if (sc->sc_type < WM_T_PCH)
   4520 		return 0;
   4521 
   4522 	rv = sc->phy.acquire(sc);
   4523 	if (rv != 0)
   4524 		return rv;
   4525 
   4526 	if (sc->sc_type == WM_T_PCH) {
   4527 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4528 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4529 			goto release;
   4530 	}
   4531 
   4532 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4533 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4534 		goto release;
   4535 
   4536 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4537 
   4538 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4539 	if (rv != 0)
   4540 		goto release;
   4541 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4542 
   4543 	if (d0_state) {
   4544 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4545 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4546 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4547 			oem_reg |= HV_OEM_BITS_LPLU;
   4548 	} else {
   4549 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4550 		    != 0)
   4551 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4552 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4553 		    != 0)
   4554 			oem_reg |= HV_OEM_BITS_LPLU;
   4555 	}
   4556 
   4557 	/* Set Restart auto-neg to activate the bits */
   4558 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4559 	    && (wm_phy_resetisblocked(sc) == false))
   4560 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4561 
   4562 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4563 
   4564 release:
   4565 	sc->phy.release(sc);
   4566 
   4567 	return rv;
   4568 }
   4569 
   4570 /* Init hardware bits */
   4571 void
   4572 wm_initialize_hardware_bits(struct wm_softc *sc)
   4573 {
   4574 	uint32_t tarc0, tarc1, reg;
   4575 
   4576 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4577 		device_xname(sc->sc_dev), __func__));
   4578 
   4579 	/* For 82571 variant, 80003 and ICHs */
   4580 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4581 	    || (sc->sc_type >= WM_T_80003)) {
   4582 
   4583 		/* Transmit Descriptor Control 0 */
   4584 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4585 		reg |= TXDCTL_COUNT_DESC;
   4586 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4587 
   4588 		/* Transmit Descriptor Control 1 */
   4589 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4590 		reg |= TXDCTL_COUNT_DESC;
   4591 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4592 
   4593 		/* TARC0 */
   4594 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4595 		switch (sc->sc_type) {
   4596 		case WM_T_82571:
   4597 		case WM_T_82572:
   4598 		case WM_T_82573:
   4599 		case WM_T_82574:
   4600 		case WM_T_82583:
   4601 		case WM_T_80003:
   4602 			/* Clear bits 30..27 */
   4603 			tarc0 &= ~__BITS(30, 27);
   4604 			break;
   4605 		default:
   4606 			break;
   4607 		}
   4608 
   4609 		switch (sc->sc_type) {
   4610 		case WM_T_82571:
   4611 		case WM_T_82572:
   4612 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4613 
   4614 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4615 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4616 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4617 			/* 8257[12] Errata No.7 */
   4618 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4619 
   4620 			/* TARC1 bit 28 */
   4621 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4622 				tarc1 &= ~__BIT(28);
   4623 			else
   4624 				tarc1 |= __BIT(28);
   4625 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4626 
   4627 			/*
   4628 			 * 8257[12] Errata No.13
   4629 			 * Disable Dyamic Clock Gating.
   4630 			 */
   4631 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4632 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4633 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4634 			break;
   4635 		case WM_T_82573:
   4636 		case WM_T_82574:
   4637 		case WM_T_82583:
   4638 			if ((sc->sc_type == WM_T_82574)
   4639 			    || (sc->sc_type == WM_T_82583))
   4640 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4641 
   4642 			/* Extended Device Control */
   4643 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4644 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4645 			reg |= __BIT(22);	/* Set bit 22 */
   4646 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4647 
   4648 			/* Device Control */
   4649 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4650 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4651 
   4652 			/* PCIe Control Register */
   4653 			/*
   4654 			 * 82573 Errata (unknown).
   4655 			 *
   4656 			 * 82574 Errata 25 and 82583 Errata 12
   4657 			 * "Dropped Rx Packets":
   4658 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4659 			 */
   4660 			reg = CSR_READ(sc, WMREG_GCR);
   4661 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4662 			CSR_WRITE(sc, WMREG_GCR, reg);
   4663 
   4664 			if ((sc->sc_type == WM_T_82574)
   4665 			    || (sc->sc_type == WM_T_82583)) {
   4666 				/*
   4667 				 * Document says this bit must be set for
   4668 				 * proper operation.
   4669 				 */
   4670 				reg = CSR_READ(sc, WMREG_GCR);
   4671 				reg |= __BIT(22);
   4672 				CSR_WRITE(sc, WMREG_GCR, reg);
   4673 
   4674 				/*
   4675 				 * Apply workaround for hardware errata
   4676 				 * documented in errata docs Fixes issue where
   4677 				 * some error prone or unreliable PCIe
   4678 				 * completions are occurring, particularly
   4679 				 * with ASPM enabled. Without fix, issue can
   4680 				 * cause Tx timeouts.
   4681 				 */
   4682 				reg = CSR_READ(sc, WMREG_GCR2);
   4683 				reg |= __BIT(0);
   4684 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4685 			}
   4686 			break;
   4687 		case WM_T_80003:
   4688 			/* TARC0 */
   4689 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4690 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4691 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4692 
   4693 			/* TARC1 bit 28 */
   4694 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4695 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4696 				tarc1 &= ~__BIT(28);
   4697 			else
   4698 				tarc1 |= __BIT(28);
   4699 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4700 			break;
   4701 		case WM_T_ICH8:
   4702 		case WM_T_ICH9:
   4703 		case WM_T_ICH10:
   4704 		case WM_T_PCH:
   4705 		case WM_T_PCH2:
   4706 		case WM_T_PCH_LPT:
   4707 		case WM_T_PCH_SPT:
   4708 		case WM_T_PCH_CNP:
   4709 			/* TARC0 */
   4710 			if (sc->sc_type == WM_T_ICH8) {
   4711 				/* Set TARC0 bits 29 and 28 */
   4712 				tarc0 |= __BITS(29, 28);
   4713 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4714 				tarc0 |= __BIT(29);
   4715 				/*
   4716 				 *  Drop bit 28. From Linux.
   4717 				 * See I218/I219 spec update
   4718 				 * "5. Buffer Overrun While the I219 is
   4719 				 * Processing DMA Transactions"
   4720 				 */
   4721 				tarc0 &= ~__BIT(28);
   4722 			}
   4723 			/* Set TARC0 bits 23,24,26,27 */
   4724 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4725 
   4726 			/* CTRL_EXT */
   4727 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4728 			reg |= __BIT(22);	/* Set bit 22 */
   4729 			/*
   4730 			 * Enable PHY low-power state when MAC is at D3
   4731 			 * w/o WoL
   4732 			 */
   4733 			if (sc->sc_type >= WM_T_PCH)
   4734 				reg |= CTRL_EXT_PHYPDEN;
   4735 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4736 
   4737 			/* TARC1 */
   4738 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4739 			/* bit 28 */
   4740 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4741 				tarc1 &= ~__BIT(28);
   4742 			else
   4743 				tarc1 |= __BIT(28);
   4744 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4745 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4746 
   4747 			/* Device Status */
   4748 			if (sc->sc_type == WM_T_ICH8) {
   4749 				reg = CSR_READ(sc, WMREG_STATUS);
   4750 				reg &= ~__BIT(31);
   4751 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4752 
   4753 			}
   4754 
   4755 			/* IOSFPC */
   4756 			if (sc->sc_type == WM_T_PCH_SPT) {
   4757 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4758 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4759 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4760 			}
   4761 			/*
   4762 			 * Work-around descriptor data corruption issue during
   4763 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4764 			 * capability.
   4765 			 */
   4766 			reg = CSR_READ(sc, WMREG_RFCTL);
   4767 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4768 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4769 			break;
   4770 		default:
   4771 			break;
   4772 		}
   4773 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4774 
   4775 		switch (sc->sc_type) {
   4776 		/*
   4777 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4778 		 * Avoid RSS Hash Value bug.
   4779 		 */
   4780 		case WM_T_82571:
   4781 		case WM_T_82572:
   4782 		case WM_T_82573:
   4783 		case WM_T_80003:
   4784 		case WM_T_ICH8:
   4785 			reg = CSR_READ(sc, WMREG_RFCTL);
   4786 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4787 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4788 			break;
   4789 		case WM_T_82574:
   4790 			/* Use extened Rx descriptor. */
   4791 			reg = CSR_READ(sc, WMREG_RFCTL);
   4792 			reg |= WMREG_RFCTL_EXSTEN;
   4793 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4794 			break;
   4795 		default:
   4796 			break;
   4797 		}
   4798 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4799 		/*
   4800 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4801 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4802 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4803 		 * Correctly by the Device"
   4804 		 *
   4805 		 * I354(C2000) Errata AVR53:
   4806 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4807 		 * Hang"
   4808 		 */
   4809 		reg = CSR_READ(sc, WMREG_RFCTL);
   4810 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4811 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4812 	}
   4813 }
   4814 
   4815 static uint32_t
   4816 wm_rxpbs_adjust_82580(uint32_t val)
   4817 {
   4818 	uint32_t rv = 0;
   4819 
   4820 	if (val < __arraycount(wm_82580_rxpbs_table))
   4821 		rv = wm_82580_rxpbs_table[val];
   4822 
   4823 	return rv;
   4824 }
   4825 
   4826 /*
   4827  * wm_reset_phy:
   4828  *
   4829  *	generic PHY reset function.
   4830  *	Same as e1000_phy_hw_reset_generic()
   4831  */
   4832 static int
   4833 wm_reset_phy(struct wm_softc *sc)
   4834 {
   4835 	uint32_t reg;
   4836 
   4837 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4838 		device_xname(sc->sc_dev), __func__));
   4839 	if (wm_phy_resetisblocked(sc))
   4840 		return -1;
   4841 
   4842 	sc->phy.acquire(sc);
   4843 
   4844 	reg = CSR_READ(sc, WMREG_CTRL);
   4845 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4846 	CSR_WRITE_FLUSH(sc);
   4847 
   4848 	delay(sc->phy.reset_delay_us);
   4849 
   4850 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4851 	CSR_WRITE_FLUSH(sc);
   4852 
   4853 	delay(150);
   4854 
   4855 	sc->phy.release(sc);
   4856 
   4857 	wm_get_cfg_done(sc);
   4858 	wm_phy_post_reset(sc);
   4859 
   4860 	return 0;
   4861 }
   4862 
   4863 /*
   4864  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   4865  *
   4866  * In i219, the descriptor rings must be emptied before resetting the HW
   4867  * or before changing the device state to D3 during runtime (runtime PM).
   4868  *
   4869  * Failure to do this will cause the HW to enter a unit hang state which can
   4870  * only be released by PCI reset on the device.
   4871  *
   4872  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   4873  */
   4874 static void
   4875 wm_flush_desc_rings(struct wm_softc *sc)
   4876 {
   4877 	pcireg_t preg;
   4878 	uint32_t reg;
   4879 	struct wm_txqueue *txq;
   4880 	wiseman_txdesc_t *txd;
   4881 	int nexttx;
   4882 	uint32_t rctl;
   4883 
   4884 	/* First, disable MULR fix in FEXTNVM11 */
   4885 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4886 	reg |= FEXTNVM11_DIS_MULRFIX;
   4887 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4888 
   4889 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4890 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4891 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4892 		return;
   4893 
   4894 	/*
   4895 	 * Remove all descriptors from the tx_ring.
   4896 	 *
   4897 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   4898 	 * happens when the HW reads the regs. We  assign the ring itself as
   4899 	 * the data of the next descriptor. We don't care about the data we are
   4900 	 * about to reset the HW.
   4901 	 */
   4902 #ifdef WM_DEBUG
   4903 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   4904 #endif
   4905 	reg = CSR_READ(sc, WMREG_TCTL);
   4906 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4907 
   4908 	txq = &sc->sc_queue[0].wmq_txq;
   4909 	nexttx = txq->txq_next;
   4910 	txd = &txq->txq_descs[nexttx];
   4911 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   4912 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4913 	txd->wtx_fields.wtxu_status = 0;
   4914 	txd->wtx_fields.wtxu_options = 0;
   4915 	txd->wtx_fields.wtxu_vlan = 0;
   4916 
   4917 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4918 	    BUS_SPACE_BARRIER_WRITE);
   4919 
   4920 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4921 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4922 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4923 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4924 	delay(250);
   4925 
   4926 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4927 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4928 		return;
   4929 
   4930 	/*
   4931 	 * Mark all descriptors in the RX ring as consumed and disable the
   4932 	 * rx ring.
   4933 	 */
   4934 #ifdef WM_DEBUG
   4935 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4936 #endif
   4937 	rctl = CSR_READ(sc, WMREG_RCTL);
   4938 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4939 	CSR_WRITE_FLUSH(sc);
   4940 	delay(150);
   4941 
   4942 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4943 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4944 	reg &= 0xffffc000;
   4945 	/*
   4946 	 * Update thresholds: prefetch threshold to 31, host threshold
   4947 	 * to 1 and make sure the granularity is "descriptors" and not
   4948 	 * "cache lines"
   4949 	 */
   4950 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4951 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4952 
   4953 	/* Momentarily enable the RX ring for the changes to take effect */
   4954 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4955 	CSR_WRITE_FLUSH(sc);
   4956 	delay(150);
   4957 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4958 }
   4959 
   4960 /*
   4961  * wm_reset:
   4962  *
   4963  *	Reset the i82542 chip.
   4964  */
   4965 static void
   4966 wm_reset(struct wm_softc *sc)
   4967 {
   4968 	int phy_reset = 0;
   4969 	int i, error = 0;
   4970 	uint32_t reg;
   4971 	uint16_t kmreg;
   4972 	int rv;
   4973 
   4974 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4975 		device_xname(sc->sc_dev), __func__));
   4976 	KASSERT(sc->sc_type != 0);
   4977 
   4978 	/*
   4979 	 * Allocate on-chip memory according to the MTU size.
   4980 	 * The Packet Buffer Allocation register must be written
   4981 	 * before the chip is reset.
   4982 	 */
   4983 	switch (sc->sc_type) {
   4984 	case WM_T_82547:
   4985 	case WM_T_82547_2:
   4986 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4987 		    PBA_22K : PBA_30K;
   4988 		for (i = 0; i < sc->sc_nqueues; i++) {
   4989 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4990 			txq->txq_fifo_head = 0;
   4991 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4992 			txq->txq_fifo_size =
   4993 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4994 			txq->txq_fifo_stall = 0;
   4995 		}
   4996 		break;
   4997 	case WM_T_82571:
   4998 	case WM_T_82572:
   4999 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   5000 	case WM_T_80003:
   5001 		sc->sc_pba = PBA_32K;
   5002 		break;
   5003 	case WM_T_82573:
   5004 		sc->sc_pba = PBA_12K;
   5005 		break;
   5006 	case WM_T_82574:
   5007 	case WM_T_82583:
   5008 		sc->sc_pba = PBA_20K;
   5009 		break;
   5010 	case WM_T_82576:
   5011 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5012 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5013 		break;
   5014 	case WM_T_82580:
   5015 	case WM_T_I350:
   5016 	case WM_T_I354:
   5017 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5018 		break;
   5019 	case WM_T_I210:
   5020 	case WM_T_I211:
   5021 		sc->sc_pba = PBA_34K;
   5022 		break;
   5023 	case WM_T_ICH8:
   5024 		/* Workaround for a bit corruption issue in FIFO memory */
   5025 		sc->sc_pba = PBA_8K;
   5026 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5027 		break;
   5028 	case WM_T_ICH9:
   5029 	case WM_T_ICH10:
   5030 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5031 		    PBA_14K : PBA_10K;
   5032 		break;
   5033 	case WM_T_PCH:
   5034 	case WM_T_PCH2:	/* XXX 14K? */
   5035 	case WM_T_PCH_LPT:
   5036 	case WM_T_PCH_SPT:
   5037 	case WM_T_PCH_CNP:
   5038 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5039 		    PBA_12K : PBA_26K;
   5040 		break;
   5041 	default:
   5042 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5043 		    PBA_40K : PBA_48K;
   5044 		break;
   5045 	}
   5046 	/*
   5047 	 * Only old or non-multiqueue devices have the PBA register
   5048 	 * XXX Need special handling for 82575.
   5049 	 */
   5050 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5051 	    || (sc->sc_type == WM_T_82575))
   5052 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5053 
   5054 	/* Prevent the PCI-E bus from sticking */
   5055 	if (sc->sc_flags & WM_F_PCIE) {
   5056 		int timeout = 800;
   5057 
   5058 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5059 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5060 
   5061 		while (timeout--) {
   5062 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5063 			    == 0)
   5064 				break;
   5065 			delay(100);
   5066 		}
   5067 		if (timeout == 0)
   5068 			device_printf(sc->sc_dev,
   5069 			    "failed to disable busmastering\n");
   5070 	}
   5071 
   5072 	/* Set the completion timeout for interface */
   5073 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5074 	    || (sc->sc_type == WM_T_82580)
   5075 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5076 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5077 		wm_set_pcie_completion_timeout(sc);
   5078 
   5079 	/* Clear interrupt */
   5080 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5081 	if (wm_is_using_msix(sc)) {
   5082 		if (sc->sc_type != WM_T_82574) {
   5083 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5084 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5085 		} else
   5086 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5087 	}
   5088 
   5089 	/* Stop the transmit and receive processes. */
   5090 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5091 	sc->sc_rctl &= ~RCTL_EN;
   5092 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5093 	CSR_WRITE_FLUSH(sc);
   5094 
   5095 	/* XXX set_tbi_sbp_82543() */
   5096 
   5097 	delay(10*1000);
   5098 
   5099 	/* Must acquire the MDIO ownership before MAC reset */
   5100 	switch (sc->sc_type) {
   5101 	case WM_T_82573:
   5102 	case WM_T_82574:
   5103 	case WM_T_82583:
   5104 		error = wm_get_hw_semaphore_82573(sc);
   5105 		break;
   5106 	default:
   5107 		break;
   5108 	}
   5109 
   5110 	/*
   5111 	 * 82541 Errata 29? & 82547 Errata 28?
   5112 	 * See also the description about PHY_RST bit in CTRL register
   5113 	 * in 8254x_GBe_SDM.pdf.
   5114 	 */
   5115 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5116 		CSR_WRITE(sc, WMREG_CTRL,
   5117 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5118 		CSR_WRITE_FLUSH(sc);
   5119 		delay(5000);
   5120 	}
   5121 
   5122 	switch (sc->sc_type) {
   5123 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5124 	case WM_T_82541:
   5125 	case WM_T_82541_2:
   5126 	case WM_T_82547:
   5127 	case WM_T_82547_2:
   5128 		/*
   5129 		 * On some chipsets, a reset through a memory-mapped write
   5130 		 * cycle can cause the chip to reset before completing the
   5131 		 * write cycle. This causes major headache that can be avoided
   5132 		 * by issuing the reset via indirect register writes through
   5133 		 * I/O space.
   5134 		 *
   5135 		 * So, if we successfully mapped the I/O BAR at attach time,
   5136 		 * use that. Otherwise, try our luck with a memory-mapped
   5137 		 * reset.
   5138 		 */
   5139 		if (sc->sc_flags & WM_F_IOH_VALID)
   5140 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5141 		else
   5142 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5143 		break;
   5144 	case WM_T_82545_3:
   5145 	case WM_T_82546_3:
   5146 		/* Use the shadow control register on these chips. */
   5147 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5148 		break;
   5149 	case WM_T_80003:
   5150 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5151 		sc->phy.acquire(sc);
   5152 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5153 		sc->phy.release(sc);
   5154 		break;
   5155 	case WM_T_ICH8:
   5156 	case WM_T_ICH9:
   5157 	case WM_T_ICH10:
   5158 	case WM_T_PCH:
   5159 	case WM_T_PCH2:
   5160 	case WM_T_PCH_LPT:
   5161 	case WM_T_PCH_SPT:
   5162 	case WM_T_PCH_CNP:
   5163 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5164 		if (wm_phy_resetisblocked(sc) == false) {
   5165 			/*
   5166 			 * Gate automatic PHY configuration by hardware on
   5167 			 * non-managed 82579
   5168 			 */
   5169 			if ((sc->sc_type == WM_T_PCH2)
   5170 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5171 				== 0))
   5172 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5173 
   5174 			reg |= CTRL_PHY_RESET;
   5175 			phy_reset = 1;
   5176 		} else
   5177 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5178 		sc->phy.acquire(sc);
   5179 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5180 		/* Don't insert a completion barrier when reset */
   5181 		delay(20*1000);
   5182 		mutex_exit(sc->sc_ich_phymtx);
   5183 		break;
   5184 	case WM_T_82580:
   5185 	case WM_T_I350:
   5186 	case WM_T_I354:
   5187 	case WM_T_I210:
   5188 	case WM_T_I211:
   5189 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5190 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5191 			CSR_WRITE_FLUSH(sc);
   5192 		delay(5000);
   5193 		break;
   5194 	case WM_T_82542_2_0:
   5195 	case WM_T_82542_2_1:
   5196 	case WM_T_82543:
   5197 	case WM_T_82540:
   5198 	case WM_T_82545:
   5199 	case WM_T_82546:
   5200 	case WM_T_82571:
   5201 	case WM_T_82572:
   5202 	case WM_T_82573:
   5203 	case WM_T_82574:
   5204 	case WM_T_82575:
   5205 	case WM_T_82576:
   5206 	case WM_T_82583:
   5207 	default:
   5208 		/* Everything else can safely use the documented method. */
   5209 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5210 		break;
   5211 	}
   5212 
   5213 	/* Must release the MDIO ownership after MAC reset */
   5214 	switch (sc->sc_type) {
   5215 	case WM_T_82573:
   5216 	case WM_T_82574:
   5217 	case WM_T_82583:
   5218 		if (error == 0)
   5219 			wm_put_hw_semaphore_82573(sc);
   5220 		break;
   5221 	default:
   5222 		break;
   5223 	}
   5224 
   5225 	/* Set Phy Config Counter to 50msec */
   5226 	if (sc->sc_type == WM_T_PCH2) {
   5227 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5228 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5229 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5230 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5231 	}
   5232 
   5233 	if (phy_reset != 0)
   5234 		wm_get_cfg_done(sc);
   5235 
   5236 	/* Reload EEPROM */
   5237 	switch (sc->sc_type) {
   5238 	case WM_T_82542_2_0:
   5239 	case WM_T_82542_2_1:
   5240 	case WM_T_82543:
   5241 	case WM_T_82544:
   5242 		delay(10);
   5243 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5244 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5245 		CSR_WRITE_FLUSH(sc);
   5246 		delay(2000);
   5247 		break;
   5248 	case WM_T_82540:
   5249 	case WM_T_82545:
   5250 	case WM_T_82545_3:
   5251 	case WM_T_82546:
   5252 	case WM_T_82546_3:
   5253 		delay(5*1000);
   5254 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5255 		break;
   5256 	case WM_T_82541:
   5257 	case WM_T_82541_2:
   5258 	case WM_T_82547:
   5259 	case WM_T_82547_2:
   5260 		delay(20000);
   5261 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5262 		break;
   5263 	case WM_T_82571:
   5264 	case WM_T_82572:
   5265 	case WM_T_82573:
   5266 	case WM_T_82574:
   5267 	case WM_T_82583:
   5268 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5269 			delay(10);
   5270 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5271 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5272 			CSR_WRITE_FLUSH(sc);
   5273 		}
   5274 		/* check EECD_EE_AUTORD */
   5275 		wm_get_auto_rd_done(sc);
   5276 		/*
   5277 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5278 		 * is set.
   5279 		 */
   5280 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5281 		    || (sc->sc_type == WM_T_82583))
   5282 			delay(25*1000);
   5283 		break;
   5284 	case WM_T_82575:
   5285 	case WM_T_82576:
   5286 	case WM_T_82580:
   5287 	case WM_T_I350:
   5288 	case WM_T_I354:
   5289 	case WM_T_I210:
   5290 	case WM_T_I211:
   5291 	case WM_T_80003:
   5292 		/* check EECD_EE_AUTORD */
   5293 		wm_get_auto_rd_done(sc);
   5294 		break;
   5295 	case WM_T_ICH8:
   5296 	case WM_T_ICH9:
   5297 	case WM_T_ICH10:
   5298 	case WM_T_PCH:
   5299 	case WM_T_PCH2:
   5300 	case WM_T_PCH_LPT:
   5301 	case WM_T_PCH_SPT:
   5302 	case WM_T_PCH_CNP:
   5303 		break;
   5304 	default:
   5305 		panic("%s: unknown type\n", __func__);
   5306 	}
   5307 
   5308 	/* Check whether EEPROM is present or not */
   5309 	switch (sc->sc_type) {
   5310 	case WM_T_82575:
   5311 	case WM_T_82576:
   5312 	case WM_T_82580:
   5313 	case WM_T_I350:
   5314 	case WM_T_I354:
   5315 	case WM_T_ICH8:
   5316 	case WM_T_ICH9:
   5317 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5318 			/* Not found */
   5319 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5320 			if (sc->sc_type == WM_T_82575)
   5321 				wm_reset_init_script_82575(sc);
   5322 		}
   5323 		break;
   5324 	default:
   5325 		break;
   5326 	}
   5327 
   5328 	if (phy_reset != 0)
   5329 		wm_phy_post_reset(sc);
   5330 
   5331 	if ((sc->sc_type == WM_T_82580)
   5332 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5333 		/* Clear global device reset status bit */
   5334 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5335 	}
   5336 
   5337 	/* Clear any pending interrupt events. */
   5338 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5339 	reg = CSR_READ(sc, WMREG_ICR);
   5340 	if (wm_is_using_msix(sc)) {
   5341 		if (sc->sc_type != WM_T_82574) {
   5342 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5343 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5344 		} else
   5345 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5346 	}
   5347 
   5348 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5349 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5350 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5351 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5352 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5353 		reg |= KABGTXD_BGSQLBIAS;
   5354 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5355 	}
   5356 
   5357 	/* Reload sc_ctrl */
   5358 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5359 
   5360 	wm_set_eee(sc);
   5361 
   5362 	/*
   5363 	 * For PCH, this write will make sure that any noise will be detected
   5364 	 * as a CRC error and be dropped rather than show up as a bad packet
   5365 	 * to the DMA engine
   5366 	 */
   5367 	if (sc->sc_type == WM_T_PCH)
   5368 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5369 
   5370 	if (sc->sc_type >= WM_T_82544)
   5371 		CSR_WRITE(sc, WMREG_WUC, 0);
   5372 
   5373 	if (sc->sc_type < WM_T_82575)
   5374 		wm_disable_aspm(sc); /* Workaround for some chips */
   5375 
   5376 	wm_reset_mdicnfg_82580(sc);
   5377 
   5378 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5379 		wm_pll_workaround_i210(sc);
   5380 
   5381 	if (sc->sc_type == WM_T_80003) {
   5382 		/* Default to TRUE to enable the MDIC W/A */
   5383 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5384 
   5385 		rv = wm_kmrn_readreg(sc,
   5386 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5387 		if (rv == 0) {
   5388 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5389 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5390 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5391 			else
   5392 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5393 		}
   5394 	}
   5395 }
   5396 
   5397 /*
   5398  * wm_add_rxbuf:
   5399  *
   5400  *	Add a receive buffer to the indiciated descriptor.
   5401  */
   5402 static int
   5403 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5404 {
   5405 	struct wm_softc *sc = rxq->rxq_sc;
   5406 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5407 	struct mbuf *m;
   5408 	int error;
   5409 
   5410 	KASSERT(mutex_owned(rxq->rxq_lock));
   5411 
   5412 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5413 	if (m == NULL)
   5414 		return ENOBUFS;
   5415 
   5416 	MCLGET(m, M_DONTWAIT);
   5417 	if ((m->m_flags & M_EXT) == 0) {
   5418 		m_freem(m);
   5419 		return ENOBUFS;
   5420 	}
   5421 
   5422 	if (rxs->rxs_mbuf != NULL)
   5423 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5424 
   5425 	rxs->rxs_mbuf = m;
   5426 
   5427 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5428 	/*
   5429 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5430 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5431 	 */
   5432 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5433 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5434 	if (error) {
   5435 		/* XXX XXX XXX */
   5436 		aprint_error_dev(sc->sc_dev,
   5437 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5438 		panic("wm_add_rxbuf");
   5439 	}
   5440 
   5441 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5442 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5443 
   5444 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5445 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5446 			wm_init_rxdesc(rxq, idx);
   5447 	} else
   5448 		wm_init_rxdesc(rxq, idx);
   5449 
   5450 	return 0;
   5451 }
   5452 
   5453 /*
   5454  * wm_rxdrain:
   5455  *
   5456  *	Drain the receive queue.
   5457  */
   5458 static void
   5459 wm_rxdrain(struct wm_rxqueue *rxq)
   5460 {
   5461 	struct wm_softc *sc = rxq->rxq_sc;
   5462 	struct wm_rxsoft *rxs;
   5463 	int i;
   5464 
   5465 	KASSERT(mutex_owned(rxq->rxq_lock));
   5466 
   5467 	for (i = 0; i < WM_NRXDESC; i++) {
   5468 		rxs = &rxq->rxq_soft[i];
   5469 		if (rxs->rxs_mbuf != NULL) {
   5470 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5471 			m_freem(rxs->rxs_mbuf);
   5472 			rxs->rxs_mbuf = NULL;
   5473 		}
   5474 	}
   5475 }
   5476 
   5477 /*
   5478  * Setup registers for RSS.
   5479  *
   5480  * XXX not yet VMDq support
   5481  */
   5482 static void
   5483 wm_init_rss(struct wm_softc *sc)
   5484 {
   5485 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5486 	int i;
   5487 
   5488 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5489 
   5490 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5491 		unsigned int qid, reta_ent;
   5492 
   5493 		qid  = i % sc->sc_nqueues;
   5494 		switch (sc->sc_type) {
   5495 		case WM_T_82574:
   5496 			reta_ent = __SHIFTIN(qid,
   5497 			    RETA_ENT_QINDEX_MASK_82574);
   5498 			break;
   5499 		case WM_T_82575:
   5500 			reta_ent = __SHIFTIN(qid,
   5501 			    RETA_ENT_QINDEX1_MASK_82575);
   5502 			break;
   5503 		default:
   5504 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5505 			break;
   5506 		}
   5507 
   5508 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5509 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5510 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5511 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5512 	}
   5513 
   5514 	rss_getkey((uint8_t *)rss_key);
   5515 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5516 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5517 
   5518 	if (sc->sc_type == WM_T_82574)
   5519 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5520 	else
   5521 		mrqc = MRQC_ENABLE_RSS_MQ;
   5522 
   5523 	/*
   5524 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5525 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5526 	 */
   5527 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5528 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5529 #if 0
   5530 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5531 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5532 #endif
   5533 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5534 
   5535 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5536 }
   5537 
   5538 /*
   5539  * Adjust TX and RX queue numbers which the system actulally uses.
   5540  *
   5541  * The numbers are affected by below parameters.
   5542  *     - The nubmer of hardware queues
   5543  *     - The number of MSI-X vectors (= "nvectors" argument)
   5544  *     - ncpu
   5545  */
   5546 static void
   5547 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5548 {
   5549 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5550 
   5551 	if (nvectors < 2) {
   5552 		sc->sc_nqueues = 1;
   5553 		return;
   5554 	}
   5555 
   5556 	switch (sc->sc_type) {
   5557 	case WM_T_82572:
   5558 		hw_ntxqueues = 2;
   5559 		hw_nrxqueues = 2;
   5560 		break;
   5561 	case WM_T_82574:
   5562 		hw_ntxqueues = 2;
   5563 		hw_nrxqueues = 2;
   5564 		break;
   5565 	case WM_T_82575:
   5566 		hw_ntxqueues = 4;
   5567 		hw_nrxqueues = 4;
   5568 		break;
   5569 	case WM_T_82576:
   5570 		hw_ntxqueues = 16;
   5571 		hw_nrxqueues = 16;
   5572 		break;
   5573 	case WM_T_82580:
   5574 	case WM_T_I350:
   5575 	case WM_T_I354:
   5576 		hw_ntxqueues = 8;
   5577 		hw_nrxqueues = 8;
   5578 		break;
   5579 	case WM_T_I210:
   5580 		hw_ntxqueues = 4;
   5581 		hw_nrxqueues = 4;
   5582 		break;
   5583 	case WM_T_I211:
   5584 		hw_ntxqueues = 2;
   5585 		hw_nrxqueues = 2;
   5586 		break;
   5587 		/*
   5588 		 * As below ethernet controllers does not support MSI-X,
   5589 		 * this driver let them not use multiqueue.
   5590 		 *     - WM_T_80003
   5591 		 *     - WM_T_ICH8
   5592 		 *     - WM_T_ICH9
   5593 		 *     - WM_T_ICH10
   5594 		 *     - WM_T_PCH
   5595 		 *     - WM_T_PCH2
   5596 		 *     - WM_T_PCH_LPT
   5597 		 */
   5598 	default:
   5599 		hw_ntxqueues = 1;
   5600 		hw_nrxqueues = 1;
   5601 		break;
   5602 	}
   5603 
   5604 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5605 
   5606 	/*
   5607 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5608 	 * the number of queues used actually.
   5609 	 */
   5610 	if (nvectors < hw_nqueues + 1)
   5611 		sc->sc_nqueues = nvectors - 1;
   5612 	else
   5613 		sc->sc_nqueues = hw_nqueues;
   5614 
   5615 	/*
   5616 	 * As queues more then cpus cannot improve scaling, we limit
   5617 	 * the number of queues used actually.
   5618 	 */
   5619 	if (ncpu < sc->sc_nqueues)
   5620 		sc->sc_nqueues = ncpu;
   5621 }
   5622 
   5623 static inline bool
   5624 wm_is_using_msix(struct wm_softc *sc)
   5625 {
   5626 
   5627 	return (sc->sc_nintrs > 1);
   5628 }
   5629 
   5630 static inline bool
   5631 wm_is_using_multiqueue(struct wm_softc *sc)
   5632 {
   5633 
   5634 	return (sc->sc_nqueues > 1);
   5635 }
   5636 
   5637 static int
   5638 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   5639 {
   5640 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5641 
   5642 	wmq->wmq_id = qidx;
   5643 	wmq->wmq_intr_idx = intr_idx;
   5644 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   5645 	    wm_handle_queue, wmq);
   5646 	if (wmq->wmq_si != NULL)
   5647 		return 0;
   5648 
   5649 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5650 	    wmq->wmq_id);
   5651 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5652 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5653 	return ENOMEM;
   5654 }
   5655 
   5656 /*
   5657  * Both single interrupt MSI and INTx can use this function.
   5658  */
   5659 static int
   5660 wm_setup_legacy(struct wm_softc *sc)
   5661 {
   5662 	pci_chipset_tag_t pc = sc->sc_pc;
   5663 	const char *intrstr = NULL;
   5664 	char intrbuf[PCI_INTRSTR_LEN];
   5665 	int error;
   5666 
   5667 	error = wm_alloc_txrx_queues(sc);
   5668 	if (error) {
   5669 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5670 		    error);
   5671 		return ENOMEM;
   5672 	}
   5673 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5674 	    sizeof(intrbuf));
   5675 #ifdef WM_MPSAFE
   5676 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5677 #endif
   5678 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5679 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5680 	if (sc->sc_ihs[0] == NULL) {
   5681 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5682 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5683 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5684 		return ENOMEM;
   5685 	}
   5686 
   5687 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5688 	sc->sc_nintrs = 1;
   5689 
   5690 	return wm_softint_establish_queue(sc, 0, 0);
   5691 }
   5692 
   5693 static int
   5694 wm_setup_msix(struct wm_softc *sc)
   5695 {
   5696 	void *vih;
   5697 	kcpuset_t *affinity;
   5698 	int qidx, error, intr_idx, txrx_established;
   5699 	pci_chipset_tag_t pc = sc->sc_pc;
   5700 	const char *intrstr = NULL;
   5701 	char intrbuf[PCI_INTRSTR_LEN];
   5702 	char intr_xname[INTRDEVNAMEBUF];
   5703 
   5704 	if (sc->sc_nqueues < ncpu) {
   5705 		/*
   5706 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5707 		 * interrupts start from CPU#1.
   5708 		 */
   5709 		sc->sc_affinity_offset = 1;
   5710 	} else {
   5711 		/*
   5712 		 * In this case, this device use all CPUs. So, we unify
   5713 		 * affinitied cpu_index to msix vector number for readability.
   5714 		 */
   5715 		sc->sc_affinity_offset = 0;
   5716 	}
   5717 
   5718 	error = wm_alloc_txrx_queues(sc);
   5719 	if (error) {
   5720 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5721 		    error);
   5722 		return ENOMEM;
   5723 	}
   5724 
   5725 	kcpuset_create(&affinity, false);
   5726 	intr_idx = 0;
   5727 
   5728 	/*
   5729 	 * TX and RX
   5730 	 */
   5731 	txrx_established = 0;
   5732 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5733 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5734 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5735 
   5736 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5737 		    sizeof(intrbuf));
   5738 #ifdef WM_MPSAFE
   5739 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5740 		    PCI_INTR_MPSAFE, true);
   5741 #endif
   5742 		memset(intr_xname, 0, sizeof(intr_xname));
   5743 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5744 		    device_xname(sc->sc_dev), qidx);
   5745 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5746 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5747 		if (vih == NULL) {
   5748 			aprint_error_dev(sc->sc_dev,
   5749 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5750 			    intrstr ? " at " : "",
   5751 			    intrstr ? intrstr : "");
   5752 
   5753 			goto fail;
   5754 		}
   5755 		kcpuset_zero(affinity);
   5756 		/* Round-robin affinity */
   5757 		kcpuset_set(affinity, affinity_to);
   5758 		error = interrupt_distribute(vih, affinity, NULL);
   5759 		if (error == 0) {
   5760 			aprint_normal_dev(sc->sc_dev,
   5761 			    "for TX and RX interrupting at %s affinity to %u\n",
   5762 			    intrstr, affinity_to);
   5763 		} else {
   5764 			aprint_normal_dev(sc->sc_dev,
   5765 			    "for TX and RX interrupting at %s\n", intrstr);
   5766 		}
   5767 		sc->sc_ihs[intr_idx] = vih;
   5768 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   5769 			goto fail;
   5770 		txrx_established++;
   5771 		intr_idx++;
   5772 	}
   5773 
   5774 	/* LINK */
   5775 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5776 	    sizeof(intrbuf));
   5777 #ifdef WM_MPSAFE
   5778 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5779 #endif
   5780 	memset(intr_xname, 0, sizeof(intr_xname));
   5781 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5782 	    device_xname(sc->sc_dev));
   5783 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5784 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5785 	if (vih == NULL) {
   5786 		aprint_error_dev(sc->sc_dev,
   5787 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5788 		    intrstr ? " at " : "",
   5789 		    intrstr ? intrstr : "");
   5790 
   5791 		goto fail;
   5792 	}
   5793 	/* Keep default affinity to LINK interrupt */
   5794 	aprint_normal_dev(sc->sc_dev,
   5795 	    "for LINK interrupting at %s\n", intrstr);
   5796 	sc->sc_ihs[intr_idx] = vih;
   5797 	sc->sc_link_intr_idx = intr_idx;
   5798 
   5799 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5800 	kcpuset_destroy(affinity);
   5801 	return 0;
   5802 
   5803  fail:
   5804 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5805 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5806 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5807 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5808 	}
   5809 
   5810 	kcpuset_destroy(affinity);
   5811 	return ENOMEM;
   5812 }
   5813 
   5814 static void
   5815 wm_unset_stopping_flags(struct wm_softc *sc)
   5816 {
   5817 	int i;
   5818 
   5819 	KASSERT(WM_CORE_LOCKED(sc));
   5820 
   5821 	/* Must unset stopping flags in ascending order. */
   5822 	for (i = 0; i < sc->sc_nqueues; i++) {
   5823 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5824 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5825 
   5826 		mutex_enter(txq->txq_lock);
   5827 		txq->txq_stopping = false;
   5828 		mutex_exit(txq->txq_lock);
   5829 
   5830 		mutex_enter(rxq->rxq_lock);
   5831 		rxq->rxq_stopping = false;
   5832 		mutex_exit(rxq->rxq_lock);
   5833 	}
   5834 
   5835 	sc->sc_core_stopping = false;
   5836 }
   5837 
   5838 static void
   5839 wm_set_stopping_flags(struct wm_softc *sc)
   5840 {
   5841 	int i;
   5842 
   5843 	KASSERT(WM_CORE_LOCKED(sc));
   5844 
   5845 	sc->sc_core_stopping = true;
   5846 
   5847 	/* Must set stopping flags in ascending order. */
   5848 	for (i = 0; i < sc->sc_nqueues; i++) {
   5849 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5850 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5851 
   5852 		mutex_enter(rxq->rxq_lock);
   5853 		rxq->rxq_stopping = true;
   5854 		mutex_exit(rxq->rxq_lock);
   5855 
   5856 		mutex_enter(txq->txq_lock);
   5857 		txq->txq_stopping = true;
   5858 		mutex_exit(txq->txq_lock);
   5859 	}
   5860 }
   5861 
   5862 /*
   5863  * Write interrupt interval value to ITR or EITR
   5864  */
   5865 static void
   5866 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5867 {
   5868 
   5869 	if (!wmq->wmq_set_itr)
   5870 		return;
   5871 
   5872 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5873 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5874 
   5875 		/*
   5876 		 * 82575 doesn't have CNT_INGR field.
   5877 		 * So, overwrite counter field by software.
   5878 		 */
   5879 		if (sc->sc_type == WM_T_82575)
   5880 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5881 		else
   5882 			eitr |= EITR_CNT_INGR;
   5883 
   5884 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5885 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5886 		/*
   5887 		 * 82574 has both ITR and EITR. SET EITR when we use
   5888 		 * the multi queue function with MSI-X.
   5889 		 */
   5890 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5891 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5892 	} else {
   5893 		KASSERT(wmq->wmq_id == 0);
   5894 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5895 	}
   5896 
   5897 	wmq->wmq_set_itr = false;
   5898 }
   5899 
   5900 /*
   5901  * TODO
   5902  * Below dynamic calculation of itr is almost the same as linux igb,
   5903  * however it does not fit to wm(4). So, we will have been disable AIM
   5904  * until we will find appropriate calculation of itr.
   5905  */
   5906 /*
   5907  * calculate interrupt interval value to be going to write register in
   5908  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5909  */
   5910 static void
   5911 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5912 {
   5913 #ifdef NOTYET
   5914 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5915 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5916 	uint32_t avg_size = 0;
   5917 	uint32_t new_itr;
   5918 
   5919 	if (rxq->rxq_packets)
   5920 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5921 	if (txq->txq_packets)
   5922 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5923 
   5924 	if (avg_size == 0) {
   5925 		new_itr = 450; /* restore default value */
   5926 		goto out;
   5927 	}
   5928 
   5929 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5930 	avg_size += 24;
   5931 
   5932 	/* Don't starve jumbo frames */
   5933 	avg_size = uimin(avg_size, 3000);
   5934 
   5935 	/* Give a little boost to mid-size frames */
   5936 	if ((avg_size > 300) && (avg_size < 1200))
   5937 		new_itr = avg_size / 3;
   5938 	else
   5939 		new_itr = avg_size / 2;
   5940 
   5941 out:
   5942 	/*
   5943 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5944 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5945 	 */
   5946 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5947 		new_itr *= 4;
   5948 
   5949 	if (new_itr != wmq->wmq_itr) {
   5950 		wmq->wmq_itr = new_itr;
   5951 		wmq->wmq_set_itr = true;
   5952 	} else
   5953 		wmq->wmq_set_itr = false;
   5954 
   5955 	rxq->rxq_packets = 0;
   5956 	rxq->rxq_bytes = 0;
   5957 	txq->txq_packets = 0;
   5958 	txq->txq_bytes = 0;
   5959 #endif
   5960 }
   5961 
   5962 static void
   5963 wm_init_sysctls(struct wm_softc *sc)
   5964 {
   5965 	struct sysctllog **log;
   5966 	const struct sysctlnode *rnode, *qnode, *cnode;
   5967 	int i, rv;
   5968 	const char *dvname;
   5969 
   5970 	log = &sc->sc_sysctllog;
   5971 	dvname = device_xname(sc->sc_dev);
   5972 
   5973 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5974 	    0, CTLTYPE_NODE, dvname,
   5975 	    SYSCTL_DESCR("wm information and settings"),
   5976 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5977 	if (rv != 0)
   5978 		goto err;
   5979 
   5980 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5981 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5982 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5983 	if (rv != 0)
   5984 		goto teardown;
   5985 
   5986 	for (i = 0; i < sc->sc_nqueues; i++) {
   5987 		struct wm_queue *wmq = &sc->sc_queue[i];
   5988 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5989 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5990 
   5991 		snprintf(sc->sc_queue[i].sysctlname,
   5992 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   5993 
   5994 		if (sysctl_createv(log, 0, &rnode, &qnode,
   5995 		    0, CTLTYPE_NODE,
   5996 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   5997 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   5998 			break;
   5999 
   6000 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6001 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6002 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   6003 		    NULL, 0, &txq->txq_free,
   6004 		    0, CTL_CREATE, CTL_EOL) != 0)
   6005 			break;
   6006 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6007 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6008 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
   6009 		    wm_sysctl_tdh_handler, 0, (void *)txq,
   6010 		    0, CTL_CREATE, CTL_EOL) != 0)
   6011 			break;
   6012 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6013 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6014 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
   6015 		    wm_sysctl_tdt_handler, 0, (void *)txq,
   6016 		    0, CTL_CREATE, CTL_EOL) != 0)
   6017 			break;
   6018 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6019 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6020 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   6021 		    NULL, 0, &txq->txq_next,
   6022 		    0, CTL_CREATE, CTL_EOL) != 0)
   6023 			break;
   6024 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6025 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6026 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
   6027 		    NULL, 0, &txq->txq_sfree,
   6028 		    0, CTL_CREATE, CTL_EOL) != 0)
   6029 			break;
   6030 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6031 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6032 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
   6033 		    NULL, 0, &txq->txq_snext,
   6034 		    0, CTL_CREATE, CTL_EOL) != 0)
   6035 			break;
   6036 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6037 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6038 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
   6039 		    NULL, 0, &txq->txq_sdirty,
   6040 		    0, CTL_CREATE, CTL_EOL) != 0)
   6041 			break;
   6042 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6043 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6044 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
   6045 		    NULL, 0, &txq->txq_flags,
   6046 		    0, CTL_CREATE, CTL_EOL) != 0)
   6047 			break;
   6048 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6049 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6050 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
   6051 		    NULL, 0, &txq->txq_stopping,
   6052 		    0, CTL_CREATE, CTL_EOL) != 0)
   6053 			break;
   6054 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6055 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6056 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
   6057 		    NULL, 0, &txq->txq_sending,
   6058 		    0, CTL_CREATE, CTL_EOL) != 0)
   6059 			break;
   6060 
   6061 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6062 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6063 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6064 		    NULL, 0, &rxq->rxq_ptr,
   6065 		    0, CTL_CREATE, CTL_EOL) != 0)
   6066 			break;
   6067 	}
   6068 
   6069 #ifdef WM_DEBUG
   6070 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6071 	    CTLTYPE_INT, "debug_flags",
   6072 	    SYSCTL_DESCR(
   6073 		    "Debug flags:\n"	\
   6074 		    "\t0x01 LINK\n"	\
   6075 		    "\t0x02 TX\n"	\
   6076 		    "\t0x04 RX\n"	\
   6077 		    "\t0x08 GMII\n"	\
   6078 		    "\t0x10 MANAGE\n"	\
   6079 		    "\t0x20 NVM\n"	\
   6080 		    "\t0x40 INIT\n"	\
   6081 		    "\t0x80 LOCK"),
   6082 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6083 	if (rv != 0)
   6084 		goto teardown;
   6085 #endif
   6086 
   6087 	return;
   6088 
   6089 teardown:
   6090 	sysctl_teardown(log);
   6091 err:
   6092 	sc->sc_sysctllog = NULL;
   6093 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6094 	    __func__, rv);
   6095 }
   6096 
   6097 /*
   6098  * wm_init:		[ifnet interface function]
   6099  *
   6100  *	Initialize the interface.
   6101  */
   6102 static int
   6103 wm_init(struct ifnet *ifp)
   6104 {
   6105 	struct wm_softc *sc = ifp->if_softc;
   6106 	int ret;
   6107 
   6108 	WM_CORE_LOCK(sc);
   6109 	ret = wm_init_locked(ifp);
   6110 	WM_CORE_UNLOCK(sc);
   6111 
   6112 	return ret;
   6113 }
   6114 
   6115 static int
   6116 wm_init_locked(struct ifnet *ifp)
   6117 {
   6118 	struct wm_softc *sc = ifp->if_softc;
   6119 	struct ethercom *ec = &sc->sc_ethercom;
   6120 	int i, j, trynum, error = 0;
   6121 	uint32_t reg, sfp_mask = 0;
   6122 
   6123 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6124 		device_xname(sc->sc_dev), __func__));
   6125 	KASSERT(WM_CORE_LOCKED(sc));
   6126 
   6127 	/*
   6128 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6129 	 * There is a small but measurable benefit to avoiding the adjusment
   6130 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6131 	 * on such platforms.  One possibility is that the DMA itself is
   6132 	 * slightly more efficient if the front of the entire packet (instead
   6133 	 * of the front of the headers) is aligned.
   6134 	 *
   6135 	 * Note we must always set align_tweak to 0 if we are using
   6136 	 * jumbo frames.
   6137 	 */
   6138 #ifdef __NO_STRICT_ALIGNMENT
   6139 	sc->sc_align_tweak = 0;
   6140 #else
   6141 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6142 		sc->sc_align_tweak = 0;
   6143 	else
   6144 		sc->sc_align_tweak = 2;
   6145 #endif /* __NO_STRICT_ALIGNMENT */
   6146 
   6147 	/* Cancel any pending I/O. */
   6148 	wm_stop_locked(ifp, false, false);
   6149 
   6150 	/* Update statistics before reset */
   6151 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6152 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6153 
   6154 	/* >= PCH_SPT hardware workaround before reset. */
   6155 	if (sc->sc_type >= WM_T_PCH_SPT)
   6156 		wm_flush_desc_rings(sc);
   6157 
   6158 	/* Reset the chip to a known state. */
   6159 	wm_reset(sc);
   6160 
   6161 	/*
   6162 	 * AMT based hardware can now take control from firmware
   6163 	 * Do this after reset.
   6164 	 */
   6165 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6166 		wm_get_hw_control(sc);
   6167 
   6168 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6169 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6170 		wm_legacy_irq_quirk_spt(sc);
   6171 
   6172 	/* Init hardware bits */
   6173 	wm_initialize_hardware_bits(sc);
   6174 
   6175 	/* Reset the PHY. */
   6176 	if (sc->sc_flags & WM_F_HAS_MII)
   6177 		wm_gmii_reset(sc);
   6178 
   6179 	if (sc->sc_type >= WM_T_ICH8) {
   6180 		reg = CSR_READ(sc, WMREG_GCR);
   6181 		/*
   6182 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6183 		 * default after reset.
   6184 		 */
   6185 		if (sc->sc_type == WM_T_ICH8)
   6186 			reg |= GCR_NO_SNOOP_ALL;
   6187 		else
   6188 			reg &= ~GCR_NO_SNOOP_ALL;
   6189 		CSR_WRITE(sc, WMREG_GCR, reg);
   6190 	}
   6191 
   6192 	if ((sc->sc_type >= WM_T_ICH8)
   6193 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6194 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6195 
   6196 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6197 		reg |= CTRL_EXT_RO_DIS;
   6198 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6199 	}
   6200 
   6201 	/* Calculate (E)ITR value */
   6202 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6203 		/*
   6204 		 * For NEWQUEUE's EITR (except for 82575).
   6205 		 * 82575's EITR should be set same throttling value as other
   6206 		 * old controllers' ITR because the interrupt/sec calculation
   6207 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6208 		 *
   6209 		 * 82574's EITR should be set same throttling value as ITR.
   6210 		 *
   6211 		 * For N interrupts/sec, set this value to:
   6212 		 * 1,000,000 / N in contrast to ITR throttling value.
   6213 		 */
   6214 		sc->sc_itr_init = 450;
   6215 	} else if (sc->sc_type >= WM_T_82543) {
   6216 		/*
   6217 		 * Set up the interrupt throttling register (units of 256ns)
   6218 		 * Note that a footnote in Intel's documentation says this
   6219 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6220 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6221 		 * that that is also true for the 1024ns units of the other
   6222 		 * interrupt-related timer registers -- so, really, we ought
   6223 		 * to divide this value by 4 when the link speed is low.
   6224 		 *
   6225 		 * XXX implement this division at link speed change!
   6226 		 */
   6227 
   6228 		/*
   6229 		 * For N interrupts/sec, set this value to:
   6230 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6231 		 * absolute and packet timer values to this value
   6232 		 * divided by 4 to get "simple timer" behavior.
   6233 		 */
   6234 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6235 	}
   6236 
   6237 	error = wm_init_txrx_queues(sc);
   6238 	if (error)
   6239 		goto out;
   6240 
   6241 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6242 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6243 	    (sc->sc_type >= WM_T_82575))
   6244 		wm_serdes_power_up_link_82575(sc);
   6245 
   6246 	/* Clear out the VLAN table -- we don't use it (yet). */
   6247 	CSR_WRITE(sc, WMREG_VET, 0);
   6248 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6249 		trynum = 10; /* Due to hw errata */
   6250 	else
   6251 		trynum = 1;
   6252 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6253 		for (j = 0; j < trynum; j++)
   6254 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6255 
   6256 	/*
   6257 	 * Set up flow-control parameters.
   6258 	 *
   6259 	 * XXX Values could probably stand some tuning.
   6260 	 */
   6261 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6262 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6263 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6264 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6265 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6266 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6267 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6268 	}
   6269 
   6270 	sc->sc_fcrtl = FCRTL_DFLT;
   6271 	if (sc->sc_type < WM_T_82543) {
   6272 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6273 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6274 	} else {
   6275 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6276 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6277 	}
   6278 
   6279 	if (sc->sc_type == WM_T_80003)
   6280 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6281 	else
   6282 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6283 
   6284 	/* Writes the control register. */
   6285 	wm_set_vlan(sc);
   6286 
   6287 	if (sc->sc_flags & WM_F_HAS_MII) {
   6288 		uint16_t kmreg;
   6289 
   6290 		switch (sc->sc_type) {
   6291 		case WM_T_80003:
   6292 		case WM_T_ICH8:
   6293 		case WM_T_ICH9:
   6294 		case WM_T_ICH10:
   6295 		case WM_T_PCH:
   6296 		case WM_T_PCH2:
   6297 		case WM_T_PCH_LPT:
   6298 		case WM_T_PCH_SPT:
   6299 		case WM_T_PCH_CNP:
   6300 			/*
   6301 			 * Set the mac to wait the maximum time between each
   6302 			 * iteration and increase the max iterations when
   6303 			 * polling the phy; this fixes erroneous timeouts at
   6304 			 * 10Mbps.
   6305 			 */
   6306 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6307 			    0xFFFF);
   6308 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6309 			    &kmreg);
   6310 			kmreg |= 0x3F;
   6311 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6312 			    kmreg);
   6313 			break;
   6314 		default:
   6315 			break;
   6316 		}
   6317 
   6318 		if (sc->sc_type == WM_T_80003) {
   6319 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6320 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6321 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6322 
   6323 			/* Bypass RX and TX FIFO's */
   6324 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6325 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6326 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6327 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6328 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6329 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6330 		}
   6331 	}
   6332 #if 0
   6333 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6334 #endif
   6335 
   6336 	/* Set up checksum offload parameters. */
   6337 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6338 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6339 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6340 		reg |= RXCSUM_IPOFL;
   6341 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6342 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6343 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6344 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6345 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6346 
   6347 	/* Set registers about MSI-X */
   6348 	if (wm_is_using_msix(sc)) {
   6349 		uint32_t ivar, qintr_idx;
   6350 		struct wm_queue *wmq;
   6351 		unsigned int qid;
   6352 
   6353 		if (sc->sc_type == WM_T_82575) {
   6354 			/* Interrupt control */
   6355 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6356 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6357 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6358 
   6359 			/* TX and RX */
   6360 			for (i = 0; i < sc->sc_nqueues; i++) {
   6361 				wmq = &sc->sc_queue[i];
   6362 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6363 				    EITR_TX_QUEUE(wmq->wmq_id)
   6364 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6365 			}
   6366 			/* Link status */
   6367 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6368 			    EITR_OTHER);
   6369 		} else if (sc->sc_type == WM_T_82574) {
   6370 			/* Interrupt control */
   6371 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6372 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6373 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6374 
   6375 			/*
   6376 			 * Workaround issue with spurious interrupts
   6377 			 * in MSI-X mode.
   6378 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6379 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6380 			 */
   6381 			reg = CSR_READ(sc, WMREG_RFCTL);
   6382 			reg |= WMREG_RFCTL_ACKDIS;
   6383 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6384 
   6385 			ivar = 0;
   6386 			/* TX and RX */
   6387 			for (i = 0; i < sc->sc_nqueues; i++) {
   6388 				wmq = &sc->sc_queue[i];
   6389 				qid = wmq->wmq_id;
   6390 				qintr_idx = wmq->wmq_intr_idx;
   6391 
   6392 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6393 				    IVAR_TX_MASK_Q_82574(qid));
   6394 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6395 				    IVAR_RX_MASK_Q_82574(qid));
   6396 			}
   6397 			/* Link status */
   6398 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6399 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6400 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6401 		} else {
   6402 			/* Interrupt control */
   6403 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6404 			    | GPIE_EIAME | GPIE_PBA);
   6405 
   6406 			switch (sc->sc_type) {
   6407 			case WM_T_82580:
   6408 			case WM_T_I350:
   6409 			case WM_T_I354:
   6410 			case WM_T_I210:
   6411 			case WM_T_I211:
   6412 				/* TX and RX */
   6413 				for (i = 0; i < sc->sc_nqueues; i++) {
   6414 					wmq = &sc->sc_queue[i];
   6415 					qid = wmq->wmq_id;
   6416 					qintr_idx = wmq->wmq_intr_idx;
   6417 
   6418 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6419 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6420 					ivar |= __SHIFTIN((qintr_idx
   6421 						| IVAR_VALID),
   6422 					    IVAR_TX_MASK_Q(qid));
   6423 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6424 					ivar |= __SHIFTIN((qintr_idx
   6425 						| IVAR_VALID),
   6426 					    IVAR_RX_MASK_Q(qid));
   6427 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6428 				}
   6429 				break;
   6430 			case WM_T_82576:
   6431 				/* TX and RX */
   6432 				for (i = 0; i < sc->sc_nqueues; i++) {
   6433 					wmq = &sc->sc_queue[i];
   6434 					qid = wmq->wmq_id;
   6435 					qintr_idx = wmq->wmq_intr_idx;
   6436 
   6437 					ivar = CSR_READ(sc,
   6438 					    WMREG_IVAR_Q_82576(qid));
   6439 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6440 					ivar |= __SHIFTIN((qintr_idx
   6441 						| IVAR_VALID),
   6442 					    IVAR_TX_MASK_Q_82576(qid));
   6443 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6444 					ivar |= __SHIFTIN((qintr_idx
   6445 						| IVAR_VALID),
   6446 					    IVAR_RX_MASK_Q_82576(qid));
   6447 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6448 					    ivar);
   6449 				}
   6450 				break;
   6451 			default:
   6452 				break;
   6453 			}
   6454 
   6455 			/* Link status */
   6456 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6457 			    IVAR_MISC_OTHER);
   6458 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6459 		}
   6460 
   6461 		if (wm_is_using_multiqueue(sc)) {
   6462 			wm_init_rss(sc);
   6463 
   6464 			/*
   6465 			** NOTE: Receive Full-Packet Checksum Offload
   6466 			** is mutually exclusive with Multiqueue. However
   6467 			** this is not the same as TCP/IP checksums which
   6468 			** still work.
   6469 			*/
   6470 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6471 			reg |= RXCSUM_PCSD;
   6472 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6473 		}
   6474 	}
   6475 
   6476 	/* Set up the interrupt registers. */
   6477 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6478 
   6479 	/* Enable SFP module insertion interrupt if it's required */
   6480 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6481 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6482 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6483 		sfp_mask = ICR_GPI(0);
   6484 	}
   6485 
   6486 	if (wm_is_using_msix(sc)) {
   6487 		uint32_t mask;
   6488 		struct wm_queue *wmq;
   6489 
   6490 		switch (sc->sc_type) {
   6491 		case WM_T_82574:
   6492 			mask = 0;
   6493 			for (i = 0; i < sc->sc_nqueues; i++) {
   6494 				wmq = &sc->sc_queue[i];
   6495 				mask |= ICR_TXQ(wmq->wmq_id);
   6496 				mask |= ICR_RXQ(wmq->wmq_id);
   6497 			}
   6498 			mask |= ICR_OTHER;
   6499 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6500 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6501 			break;
   6502 		default:
   6503 			if (sc->sc_type == WM_T_82575) {
   6504 				mask = 0;
   6505 				for (i = 0; i < sc->sc_nqueues; i++) {
   6506 					wmq = &sc->sc_queue[i];
   6507 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6508 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6509 				}
   6510 				mask |= EITR_OTHER;
   6511 			} else {
   6512 				mask = 0;
   6513 				for (i = 0; i < sc->sc_nqueues; i++) {
   6514 					wmq = &sc->sc_queue[i];
   6515 					mask |= 1 << wmq->wmq_intr_idx;
   6516 				}
   6517 				mask |= 1 << sc->sc_link_intr_idx;
   6518 			}
   6519 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6520 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6521 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6522 
   6523 			/* For other interrupts */
   6524 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6525 			break;
   6526 		}
   6527 	} else {
   6528 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6529 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6530 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6531 	}
   6532 
   6533 	/* Set up the inter-packet gap. */
   6534 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6535 
   6536 	if (sc->sc_type >= WM_T_82543) {
   6537 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6538 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6539 			wm_itrs_writereg(sc, wmq);
   6540 		}
   6541 		/*
   6542 		 * Link interrupts occur much less than TX
   6543 		 * interrupts and RX interrupts. So, we don't
   6544 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6545 		 * FreeBSD's if_igb.
   6546 		 */
   6547 	}
   6548 
   6549 	/* Set the VLAN ethernetype. */
   6550 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6551 
   6552 	/*
   6553 	 * Set up the transmit control register; we start out with
   6554 	 * a collision distance suitable for FDX, but update it whe
   6555 	 * we resolve the media type.
   6556 	 */
   6557 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6558 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6559 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6560 	if (sc->sc_type >= WM_T_82571)
   6561 		sc->sc_tctl |= TCTL_MULR;
   6562 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6563 
   6564 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6565 		/* Write TDT after TCTL.EN is set. See the document. */
   6566 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6567 	}
   6568 
   6569 	if (sc->sc_type == WM_T_80003) {
   6570 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6571 		reg &= ~TCTL_EXT_GCEX_MASK;
   6572 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6573 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6574 	}
   6575 
   6576 	/* Set the media. */
   6577 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6578 		goto out;
   6579 
   6580 	/* Configure for OS presence */
   6581 	wm_init_manageability(sc);
   6582 
   6583 	/*
   6584 	 * Set up the receive control register; we actually program the
   6585 	 * register when we set the receive filter. Use multicast address
   6586 	 * offset type 0.
   6587 	 *
   6588 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6589 	 * don't enable that feature.
   6590 	 */
   6591 	sc->sc_mchash_type = 0;
   6592 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6593 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6594 
   6595 	/* 82574 use one buffer extended Rx descriptor. */
   6596 	if (sc->sc_type == WM_T_82574)
   6597 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6598 
   6599 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   6600 		sc->sc_rctl |= RCTL_SECRC;
   6601 
   6602 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6603 	    && (ifp->if_mtu > ETHERMTU)) {
   6604 		sc->sc_rctl |= RCTL_LPE;
   6605 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6606 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6607 	}
   6608 
   6609 	if (MCLBYTES == 2048)
   6610 		sc->sc_rctl |= RCTL_2k;
   6611 	else {
   6612 		if (sc->sc_type >= WM_T_82543) {
   6613 			switch (MCLBYTES) {
   6614 			case 4096:
   6615 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6616 				break;
   6617 			case 8192:
   6618 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6619 				break;
   6620 			case 16384:
   6621 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6622 				break;
   6623 			default:
   6624 				panic("wm_init: MCLBYTES %d unsupported",
   6625 				    MCLBYTES);
   6626 				break;
   6627 			}
   6628 		} else
   6629 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6630 	}
   6631 
   6632 	/* Enable ECC */
   6633 	switch (sc->sc_type) {
   6634 	case WM_T_82571:
   6635 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6636 		reg |= PBA_ECC_CORR_EN;
   6637 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6638 		break;
   6639 	case WM_T_PCH_LPT:
   6640 	case WM_T_PCH_SPT:
   6641 	case WM_T_PCH_CNP:
   6642 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6643 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6644 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6645 
   6646 		sc->sc_ctrl |= CTRL_MEHE;
   6647 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6648 		break;
   6649 	default:
   6650 		break;
   6651 	}
   6652 
   6653 	/*
   6654 	 * Set the receive filter.
   6655 	 *
   6656 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6657 	 * the setting of RCTL.EN in wm_set_filter()
   6658 	 */
   6659 	wm_set_filter(sc);
   6660 
   6661 	/* On 575 and later set RDT only if RX enabled */
   6662 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6663 		int qidx;
   6664 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6665 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6666 			for (i = 0; i < WM_NRXDESC; i++) {
   6667 				mutex_enter(rxq->rxq_lock);
   6668 				wm_init_rxdesc(rxq, i);
   6669 				mutex_exit(rxq->rxq_lock);
   6670 
   6671 			}
   6672 		}
   6673 	}
   6674 
   6675 	wm_unset_stopping_flags(sc);
   6676 
   6677 	/* Start the one second link check clock. */
   6678 	callout_schedule(&sc->sc_tick_ch, hz);
   6679 
   6680 	/* ...all done! */
   6681 	ifp->if_flags |= IFF_RUNNING;
   6682 
   6683  out:
   6684 	/* Save last flags for the callback */
   6685 	sc->sc_if_flags = ifp->if_flags;
   6686 	sc->sc_ec_capenable = ec->ec_capenable;
   6687 	if (error)
   6688 		log(LOG_ERR, "%s: interface not running\n",
   6689 		    device_xname(sc->sc_dev));
   6690 	return error;
   6691 }
   6692 
   6693 /*
   6694  * wm_stop:		[ifnet interface function]
   6695  *
   6696  *	Stop transmission on the interface.
   6697  */
   6698 static void
   6699 wm_stop(struct ifnet *ifp, int disable)
   6700 {
   6701 	struct wm_softc *sc = ifp->if_softc;
   6702 
   6703 	ASSERT_SLEEPABLE();
   6704 
   6705 	WM_CORE_LOCK(sc);
   6706 	wm_stop_locked(ifp, disable ? true : false, true);
   6707 	WM_CORE_UNLOCK(sc);
   6708 
   6709 	/*
   6710 	 * After wm_set_stopping_flags(), it is guaranteed
   6711 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   6712 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   6713 	 * because it can sleep...
   6714 	 * so, call workqueue_wait() here.
   6715 	 */
   6716 	for (int i = 0; i < sc->sc_nqueues; i++)
   6717 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   6718 }
   6719 
   6720 static void
   6721 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   6722 {
   6723 	struct wm_softc *sc = ifp->if_softc;
   6724 	struct wm_txsoft *txs;
   6725 	int i, qidx;
   6726 
   6727 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6728 		device_xname(sc->sc_dev), __func__));
   6729 	KASSERT(WM_CORE_LOCKED(sc));
   6730 
   6731 	wm_set_stopping_flags(sc);
   6732 
   6733 	if (sc->sc_flags & WM_F_HAS_MII) {
   6734 		/* Down the MII. */
   6735 		mii_down(&sc->sc_mii);
   6736 	} else {
   6737 #if 0
   6738 		/* Should we clear PHY's status properly? */
   6739 		wm_reset(sc);
   6740 #endif
   6741 	}
   6742 
   6743 	/* Stop the transmit and receive processes. */
   6744 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6745 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6746 	sc->sc_rctl &= ~RCTL_EN;
   6747 
   6748 	/*
   6749 	 * Clear the interrupt mask to ensure the device cannot assert its
   6750 	 * interrupt line.
   6751 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6752 	 * service any currently pending or shared interrupt.
   6753 	 */
   6754 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6755 	sc->sc_icr = 0;
   6756 	if (wm_is_using_msix(sc)) {
   6757 		if (sc->sc_type != WM_T_82574) {
   6758 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6759 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6760 		} else
   6761 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6762 	}
   6763 
   6764 	/*
   6765 	 * Stop callouts after interrupts are disabled; if we have
   6766 	 * to wait for them, we will be releasing the CORE_LOCK
   6767 	 * briefly, which will unblock interrupts on the current CPU.
   6768 	 */
   6769 
   6770 	/* Stop the one second clock. */
   6771 	if (wait)
   6772 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   6773 	else
   6774 		callout_stop(&sc->sc_tick_ch);
   6775 
   6776 	/* Stop the 82547 Tx FIFO stall check timer. */
   6777 	if (sc->sc_type == WM_T_82547) {
   6778 		if (wait)
   6779 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   6780 		else
   6781 			callout_stop(&sc->sc_txfifo_ch);
   6782 	}
   6783 
   6784 	/* Release any queued transmit buffers. */
   6785 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6786 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6787 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6788 		struct mbuf *m;
   6789 
   6790 		mutex_enter(txq->txq_lock);
   6791 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6792 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6793 			txs = &txq->txq_soft[i];
   6794 			if (txs->txs_mbuf != NULL) {
   6795 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6796 				m_freem(txs->txs_mbuf);
   6797 				txs->txs_mbuf = NULL;
   6798 			}
   6799 		}
   6800 		/* Drain txq_interq */
   6801 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6802 			m_freem(m);
   6803 		mutex_exit(txq->txq_lock);
   6804 	}
   6805 
   6806 	/* Mark the interface as down and cancel the watchdog timer. */
   6807 	ifp->if_flags &= ~IFF_RUNNING;
   6808 
   6809 	if (disable) {
   6810 		for (i = 0; i < sc->sc_nqueues; i++) {
   6811 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6812 			mutex_enter(rxq->rxq_lock);
   6813 			wm_rxdrain(rxq);
   6814 			mutex_exit(rxq->rxq_lock);
   6815 		}
   6816 	}
   6817 
   6818 #if 0 /* notyet */
   6819 	if (sc->sc_type >= WM_T_82544)
   6820 		CSR_WRITE(sc, WMREG_WUC, 0);
   6821 #endif
   6822 }
   6823 
   6824 static void
   6825 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6826 {
   6827 	struct mbuf *m;
   6828 	int i;
   6829 
   6830 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6831 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6832 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6833 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6834 		    m->m_data, m->m_len, m->m_flags);
   6835 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6836 	    i, i == 1 ? "" : "s");
   6837 }
   6838 
   6839 /*
   6840  * wm_82547_txfifo_stall:
   6841  *
   6842  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6843  *	reset the FIFO pointers, and restart packet transmission.
   6844  */
   6845 static void
   6846 wm_82547_txfifo_stall(void *arg)
   6847 {
   6848 	struct wm_softc *sc = arg;
   6849 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6850 
   6851 	mutex_enter(txq->txq_lock);
   6852 
   6853 	if (txq->txq_stopping)
   6854 		goto out;
   6855 
   6856 	if (txq->txq_fifo_stall) {
   6857 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6858 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6859 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6860 			/*
   6861 			 * Packets have drained.  Stop transmitter, reset
   6862 			 * FIFO pointers, restart transmitter, and kick
   6863 			 * the packet queue.
   6864 			 */
   6865 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6866 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6867 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6868 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6869 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6870 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6871 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6872 			CSR_WRITE_FLUSH(sc);
   6873 
   6874 			txq->txq_fifo_head = 0;
   6875 			txq->txq_fifo_stall = 0;
   6876 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6877 		} else {
   6878 			/*
   6879 			 * Still waiting for packets to drain; try again in
   6880 			 * another tick.
   6881 			 */
   6882 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6883 		}
   6884 	}
   6885 
   6886 out:
   6887 	mutex_exit(txq->txq_lock);
   6888 }
   6889 
   6890 /*
   6891  * wm_82547_txfifo_bugchk:
   6892  *
   6893  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6894  *	prevent enqueueing a packet that would wrap around the end
   6895  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6896  *
   6897  *	We do this by checking the amount of space before the end
   6898  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6899  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6900  *	the internal FIFO pointers to the beginning, and restart
   6901  *	transmission on the interface.
   6902  */
   6903 #define	WM_FIFO_HDR		0x10
   6904 #define	WM_82547_PAD_LEN	0x3e0
   6905 static int
   6906 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6907 {
   6908 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6909 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6910 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6911 
   6912 	/* Just return if already stalled. */
   6913 	if (txq->txq_fifo_stall)
   6914 		return 1;
   6915 
   6916 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6917 		/* Stall only occurs in half-duplex mode. */
   6918 		goto send_packet;
   6919 	}
   6920 
   6921 	if (len >= WM_82547_PAD_LEN + space) {
   6922 		txq->txq_fifo_stall = 1;
   6923 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6924 		return 1;
   6925 	}
   6926 
   6927  send_packet:
   6928 	txq->txq_fifo_head += len;
   6929 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6930 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6931 
   6932 	return 0;
   6933 }
   6934 
   6935 static int
   6936 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6937 {
   6938 	int error;
   6939 
   6940 	/*
   6941 	 * Allocate the control data structures, and create and load the
   6942 	 * DMA map for it.
   6943 	 *
   6944 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6945 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6946 	 * both sets within the same 4G segment.
   6947 	 */
   6948 	if (sc->sc_type < WM_T_82544)
   6949 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6950 	else
   6951 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6952 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6953 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6954 	else
   6955 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6956 
   6957 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6958 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6959 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6960 		aprint_error_dev(sc->sc_dev,
   6961 		    "unable to allocate TX control data, error = %d\n",
   6962 		    error);
   6963 		goto fail_0;
   6964 	}
   6965 
   6966 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6967 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6968 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6969 		aprint_error_dev(sc->sc_dev,
   6970 		    "unable to map TX control data, error = %d\n", error);
   6971 		goto fail_1;
   6972 	}
   6973 
   6974 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6975 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6976 		aprint_error_dev(sc->sc_dev,
   6977 		    "unable to create TX control data DMA map, error = %d\n",
   6978 		    error);
   6979 		goto fail_2;
   6980 	}
   6981 
   6982 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6983 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6984 		aprint_error_dev(sc->sc_dev,
   6985 		    "unable to load TX control data DMA map, error = %d\n",
   6986 		    error);
   6987 		goto fail_3;
   6988 	}
   6989 
   6990 	return 0;
   6991 
   6992  fail_3:
   6993 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6994  fail_2:
   6995 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6996 	    WM_TXDESCS_SIZE(txq));
   6997  fail_1:
   6998 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6999  fail_0:
   7000 	return error;
   7001 }
   7002 
   7003 static void
   7004 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7005 {
   7006 
   7007 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   7008 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7009 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7010 	    WM_TXDESCS_SIZE(txq));
   7011 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7012 }
   7013 
   7014 static int
   7015 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7016 {
   7017 	int error;
   7018 	size_t rxq_descs_size;
   7019 
   7020 	/*
   7021 	 * Allocate the control data structures, and create and load the
   7022 	 * DMA map for it.
   7023 	 *
   7024 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7025 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7026 	 * both sets within the same 4G segment.
   7027 	 */
   7028 	rxq->rxq_ndesc = WM_NRXDESC;
   7029 	if (sc->sc_type == WM_T_82574)
   7030 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   7031 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7032 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   7033 	else
   7034 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   7035 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   7036 
   7037 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   7038 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   7039 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   7040 		aprint_error_dev(sc->sc_dev,
   7041 		    "unable to allocate RX control data, error = %d\n",
   7042 		    error);
   7043 		goto fail_0;
   7044 	}
   7045 
   7046 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   7047 		    rxq->rxq_desc_rseg, rxq_descs_size,
   7048 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7049 		aprint_error_dev(sc->sc_dev,
   7050 		    "unable to map RX control data, error = %d\n", error);
   7051 		goto fail_1;
   7052 	}
   7053 
   7054 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   7055 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   7056 		aprint_error_dev(sc->sc_dev,
   7057 		    "unable to create RX control data DMA map, error = %d\n",
   7058 		    error);
   7059 		goto fail_2;
   7060 	}
   7061 
   7062 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7063 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7064 		aprint_error_dev(sc->sc_dev,
   7065 		    "unable to load RX control data DMA map, error = %d\n",
   7066 		    error);
   7067 		goto fail_3;
   7068 	}
   7069 
   7070 	return 0;
   7071 
   7072  fail_3:
   7073 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7074  fail_2:
   7075 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7076 	    rxq_descs_size);
   7077  fail_1:
   7078 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7079  fail_0:
   7080 	return error;
   7081 }
   7082 
   7083 static void
   7084 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7085 {
   7086 
   7087 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7088 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7089 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7090 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7091 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7092 }
   7093 
   7094 
   7095 static int
   7096 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7097 {
   7098 	int i, error;
   7099 
   7100 	/* Create the transmit buffer DMA maps. */
   7101 	WM_TXQUEUELEN(txq) =
   7102 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7103 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7104 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7105 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7106 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7107 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7108 			aprint_error_dev(sc->sc_dev,
   7109 			    "unable to create Tx DMA map %d, error = %d\n",
   7110 			    i, error);
   7111 			goto fail;
   7112 		}
   7113 	}
   7114 
   7115 	return 0;
   7116 
   7117  fail:
   7118 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7119 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7120 			bus_dmamap_destroy(sc->sc_dmat,
   7121 			    txq->txq_soft[i].txs_dmamap);
   7122 	}
   7123 	return error;
   7124 }
   7125 
   7126 static void
   7127 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7128 {
   7129 	int i;
   7130 
   7131 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7132 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7133 			bus_dmamap_destroy(sc->sc_dmat,
   7134 			    txq->txq_soft[i].txs_dmamap);
   7135 	}
   7136 }
   7137 
   7138 static int
   7139 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7140 {
   7141 	int i, error;
   7142 
   7143 	/* Create the receive buffer DMA maps. */
   7144 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7145 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7146 			    MCLBYTES, 0, 0,
   7147 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7148 			aprint_error_dev(sc->sc_dev,
   7149 			    "unable to create Rx DMA map %d error = %d\n",
   7150 			    i, error);
   7151 			goto fail;
   7152 		}
   7153 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7154 	}
   7155 
   7156 	return 0;
   7157 
   7158  fail:
   7159 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7160 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7161 			bus_dmamap_destroy(sc->sc_dmat,
   7162 			    rxq->rxq_soft[i].rxs_dmamap);
   7163 	}
   7164 	return error;
   7165 }
   7166 
   7167 static void
   7168 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7169 {
   7170 	int i;
   7171 
   7172 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7173 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7174 			bus_dmamap_destroy(sc->sc_dmat,
   7175 			    rxq->rxq_soft[i].rxs_dmamap);
   7176 	}
   7177 }
   7178 
   7179 /*
   7180  * wm_alloc_quques:
   7181  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7182  */
   7183 static int
   7184 wm_alloc_txrx_queues(struct wm_softc *sc)
   7185 {
   7186 	int i, error, tx_done, rx_done;
   7187 
   7188 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7189 	    KM_SLEEP);
   7190 	if (sc->sc_queue == NULL) {
   7191 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7192 		error = ENOMEM;
   7193 		goto fail_0;
   7194 	}
   7195 
   7196 	/* For transmission */
   7197 	error = 0;
   7198 	tx_done = 0;
   7199 	for (i = 0; i < sc->sc_nqueues; i++) {
   7200 #ifdef WM_EVENT_COUNTERS
   7201 		int j;
   7202 		const char *xname;
   7203 #endif
   7204 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7205 		txq->txq_sc = sc;
   7206 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7207 
   7208 		error = wm_alloc_tx_descs(sc, txq);
   7209 		if (error)
   7210 			break;
   7211 		error = wm_alloc_tx_buffer(sc, txq);
   7212 		if (error) {
   7213 			wm_free_tx_descs(sc, txq);
   7214 			break;
   7215 		}
   7216 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7217 		if (txq->txq_interq == NULL) {
   7218 			wm_free_tx_descs(sc, txq);
   7219 			wm_free_tx_buffer(sc, txq);
   7220 			error = ENOMEM;
   7221 			break;
   7222 		}
   7223 
   7224 #ifdef WM_EVENT_COUNTERS
   7225 		xname = device_xname(sc->sc_dev);
   7226 
   7227 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7228 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7229 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7230 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7231 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7232 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7233 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7234 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7235 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7236 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7237 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7238 
   7239 		for (j = 0; j < WM_NTXSEGS; j++) {
   7240 			snprintf(txq->txq_txseg_evcnt_names[j],
   7241 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   7242 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   7243 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7244 		}
   7245 
   7246 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7247 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7248 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7249 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7250 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7251 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7252 #endif /* WM_EVENT_COUNTERS */
   7253 
   7254 		tx_done++;
   7255 	}
   7256 	if (error)
   7257 		goto fail_1;
   7258 
   7259 	/* For receive */
   7260 	error = 0;
   7261 	rx_done = 0;
   7262 	for (i = 0; i < sc->sc_nqueues; i++) {
   7263 #ifdef WM_EVENT_COUNTERS
   7264 		const char *xname;
   7265 #endif
   7266 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7267 		rxq->rxq_sc = sc;
   7268 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7269 
   7270 		error = wm_alloc_rx_descs(sc, rxq);
   7271 		if (error)
   7272 			break;
   7273 
   7274 		error = wm_alloc_rx_buffer(sc, rxq);
   7275 		if (error) {
   7276 			wm_free_rx_descs(sc, rxq);
   7277 			break;
   7278 		}
   7279 
   7280 #ifdef WM_EVENT_COUNTERS
   7281 		xname = device_xname(sc->sc_dev);
   7282 
   7283 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7284 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7285 
   7286 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7287 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7288 #endif /* WM_EVENT_COUNTERS */
   7289 
   7290 		rx_done++;
   7291 	}
   7292 	if (error)
   7293 		goto fail_2;
   7294 
   7295 	return 0;
   7296 
   7297  fail_2:
   7298 	for (i = 0; i < rx_done; i++) {
   7299 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7300 		wm_free_rx_buffer(sc, rxq);
   7301 		wm_free_rx_descs(sc, rxq);
   7302 		if (rxq->rxq_lock)
   7303 			mutex_obj_free(rxq->rxq_lock);
   7304 	}
   7305  fail_1:
   7306 	for (i = 0; i < tx_done; i++) {
   7307 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7308 		pcq_destroy(txq->txq_interq);
   7309 		wm_free_tx_buffer(sc, txq);
   7310 		wm_free_tx_descs(sc, txq);
   7311 		if (txq->txq_lock)
   7312 			mutex_obj_free(txq->txq_lock);
   7313 	}
   7314 
   7315 	kmem_free(sc->sc_queue,
   7316 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7317  fail_0:
   7318 	return error;
   7319 }
   7320 
   7321 /*
   7322  * wm_free_quques:
   7323  *	Free {tx,rx}descs and {tx,rx} buffers
   7324  */
   7325 static void
   7326 wm_free_txrx_queues(struct wm_softc *sc)
   7327 {
   7328 	int i;
   7329 
   7330 	for (i = 0; i < sc->sc_nqueues; i++) {
   7331 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7332 
   7333 #ifdef WM_EVENT_COUNTERS
   7334 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7335 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7336 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7337 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7338 #endif /* WM_EVENT_COUNTERS */
   7339 
   7340 		wm_free_rx_buffer(sc, rxq);
   7341 		wm_free_rx_descs(sc, rxq);
   7342 		if (rxq->rxq_lock)
   7343 			mutex_obj_free(rxq->rxq_lock);
   7344 	}
   7345 
   7346 	for (i = 0; i < sc->sc_nqueues; i++) {
   7347 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7348 		struct mbuf *m;
   7349 #ifdef WM_EVENT_COUNTERS
   7350 		int j;
   7351 
   7352 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7353 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7354 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7355 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7356 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7357 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7358 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7359 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7360 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7361 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7362 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7363 
   7364 		for (j = 0; j < WM_NTXSEGS; j++)
   7365 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7366 
   7367 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7368 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7369 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7370 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7371 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7372 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7373 #endif /* WM_EVENT_COUNTERS */
   7374 
   7375 		/* Drain txq_interq */
   7376 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7377 			m_freem(m);
   7378 		pcq_destroy(txq->txq_interq);
   7379 
   7380 		wm_free_tx_buffer(sc, txq);
   7381 		wm_free_tx_descs(sc, txq);
   7382 		if (txq->txq_lock)
   7383 			mutex_obj_free(txq->txq_lock);
   7384 	}
   7385 
   7386 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7387 }
   7388 
   7389 static void
   7390 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7391 {
   7392 
   7393 	KASSERT(mutex_owned(txq->txq_lock));
   7394 
   7395 	/* Initialize the transmit descriptor ring. */
   7396 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7397 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7398 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7399 	txq->txq_free = WM_NTXDESC(txq);
   7400 	txq->txq_next = 0;
   7401 }
   7402 
   7403 static void
   7404 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7405     struct wm_txqueue *txq)
   7406 {
   7407 
   7408 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7409 		device_xname(sc->sc_dev), __func__));
   7410 	KASSERT(mutex_owned(txq->txq_lock));
   7411 
   7412 	if (sc->sc_type < WM_T_82543) {
   7413 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7414 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7415 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7416 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7417 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7418 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7419 	} else {
   7420 		int qid = wmq->wmq_id;
   7421 
   7422 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7423 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7424 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7425 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7426 
   7427 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7428 			/*
   7429 			 * Don't write TDT before TCTL.EN is set.
   7430 			 * See the document.
   7431 			 */
   7432 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7433 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7434 			    | TXDCTL_WTHRESH(0));
   7435 		else {
   7436 			/* XXX should update with AIM? */
   7437 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7438 			if (sc->sc_type >= WM_T_82540) {
   7439 				/* Should be the same */
   7440 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7441 			}
   7442 
   7443 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7444 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7445 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7446 		}
   7447 	}
   7448 }
   7449 
   7450 static void
   7451 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7452 {
   7453 	int i;
   7454 
   7455 	KASSERT(mutex_owned(txq->txq_lock));
   7456 
   7457 	/* Initialize the transmit job descriptors. */
   7458 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7459 		txq->txq_soft[i].txs_mbuf = NULL;
   7460 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7461 	txq->txq_snext = 0;
   7462 	txq->txq_sdirty = 0;
   7463 }
   7464 
   7465 static void
   7466 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7467     struct wm_txqueue *txq)
   7468 {
   7469 
   7470 	KASSERT(mutex_owned(txq->txq_lock));
   7471 
   7472 	/*
   7473 	 * Set up some register offsets that are different between
   7474 	 * the i82542 and the i82543 and later chips.
   7475 	 */
   7476 	if (sc->sc_type < WM_T_82543)
   7477 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7478 	else
   7479 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7480 
   7481 	wm_init_tx_descs(sc, txq);
   7482 	wm_init_tx_regs(sc, wmq, txq);
   7483 	wm_init_tx_buffer(sc, txq);
   7484 
   7485 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
   7486 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
   7487 
   7488 	txq->txq_sending = false;
   7489 }
   7490 
   7491 static void
   7492 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7493     struct wm_rxqueue *rxq)
   7494 {
   7495 
   7496 	KASSERT(mutex_owned(rxq->rxq_lock));
   7497 
   7498 	/*
   7499 	 * Initialize the receive descriptor and receive job
   7500 	 * descriptor rings.
   7501 	 */
   7502 	if (sc->sc_type < WM_T_82543) {
   7503 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7504 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7505 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7506 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7507 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7508 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7509 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7510 
   7511 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7512 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7513 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7514 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7515 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7516 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7517 	} else {
   7518 		int qid = wmq->wmq_id;
   7519 
   7520 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7521 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7522 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7523 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7524 
   7525 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7526 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7527 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7528 
   7529 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7530 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7531 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7532 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7533 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7534 			    | RXDCTL_WTHRESH(1));
   7535 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7536 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7537 		} else {
   7538 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7539 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7540 			/* XXX should update with AIM? */
   7541 			CSR_WRITE(sc, WMREG_RDTR,
   7542 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7543 			/* MUST be same */
   7544 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7545 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7546 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7547 		}
   7548 	}
   7549 }
   7550 
   7551 static int
   7552 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7553 {
   7554 	struct wm_rxsoft *rxs;
   7555 	int error, i;
   7556 
   7557 	KASSERT(mutex_owned(rxq->rxq_lock));
   7558 
   7559 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7560 		rxs = &rxq->rxq_soft[i];
   7561 		if (rxs->rxs_mbuf == NULL) {
   7562 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7563 				log(LOG_ERR, "%s: unable to allocate or map "
   7564 				    "rx buffer %d, error = %d\n",
   7565 				    device_xname(sc->sc_dev), i, error);
   7566 				/*
   7567 				 * XXX Should attempt to run with fewer receive
   7568 				 * XXX buffers instead of just failing.
   7569 				 */
   7570 				wm_rxdrain(rxq);
   7571 				return ENOMEM;
   7572 			}
   7573 		} else {
   7574 			/*
   7575 			 * For 82575 and 82576, the RX descriptors must be
   7576 			 * initialized after the setting of RCTL.EN in
   7577 			 * wm_set_filter()
   7578 			 */
   7579 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7580 				wm_init_rxdesc(rxq, i);
   7581 		}
   7582 	}
   7583 	rxq->rxq_ptr = 0;
   7584 	rxq->rxq_discard = 0;
   7585 	WM_RXCHAIN_RESET(rxq);
   7586 
   7587 	return 0;
   7588 }
   7589 
   7590 static int
   7591 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7592     struct wm_rxqueue *rxq)
   7593 {
   7594 
   7595 	KASSERT(mutex_owned(rxq->rxq_lock));
   7596 
   7597 	/*
   7598 	 * Set up some register offsets that are different between
   7599 	 * the i82542 and the i82543 and later chips.
   7600 	 */
   7601 	if (sc->sc_type < WM_T_82543)
   7602 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7603 	else
   7604 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7605 
   7606 	wm_init_rx_regs(sc, wmq, rxq);
   7607 	return wm_init_rx_buffer(sc, rxq);
   7608 }
   7609 
   7610 /*
   7611  * wm_init_quques:
   7612  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7613  */
   7614 static int
   7615 wm_init_txrx_queues(struct wm_softc *sc)
   7616 {
   7617 	int i, error = 0;
   7618 
   7619 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7620 		device_xname(sc->sc_dev), __func__));
   7621 
   7622 	for (i = 0; i < sc->sc_nqueues; i++) {
   7623 		struct wm_queue *wmq = &sc->sc_queue[i];
   7624 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7625 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7626 
   7627 		/*
   7628 		 * TODO
   7629 		 * Currently, use constant variable instead of AIM.
   7630 		 * Furthermore, the interrupt interval of multiqueue which use
   7631 		 * polling mode is less than default value.
   7632 		 * More tuning and AIM are required.
   7633 		 */
   7634 		if (wm_is_using_multiqueue(sc))
   7635 			wmq->wmq_itr = 50;
   7636 		else
   7637 			wmq->wmq_itr = sc->sc_itr_init;
   7638 		wmq->wmq_set_itr = true;
   7639 
   7640 		mutex_enter(txq->txq_lock);
   7641 		wm_init_tx_queue(sc, wmq, txq);
   7642 		mutex_exit(txq->txq_lock);
   7643 
   7644 		mutex_enter(rxq->rxq_lock);
   7645 		error = wm_init_rx_queue(sc, wmq, rxq);
   7646 		mutex_exit(rxq->rxq_lock);
   7647 		if (error)
   7648 			break;
   7649 	}
   7650 
   7651 	return error;
   7652 }
   7653 
   7654 /*
   7655  * wm_tx_offload:
   7656  *
   7657  *	Set up TCP/IP checksumming parameters for the
   7658  *	specified packet.
   7659  */
   7660 static void
   7661 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7662     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7663 {
   7664 	struct mbuf *m0 = txs->txs_mbuf;
   7665 	struct livengood_tcpip_ctxdesc *t;
   7666 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7667 	uint32_t ipcse;
   7668 	struct ether_header *eh;
   7669 	int offset, iphl;
   7670 	uint8_t fields;
   7671 
   7672 	/*
   7673 	 * XXX It would be nice if the mbuf pkthdr had offset
   7674 	 * fields for the protocol headers.
   7675 	 */
   7676 
   7677 	eh = mtod(m0, struct ether_header *);
   7678 	switch (htons(eh->ether_type)) {
   7679 	case ETHERTYPE_IP:
   7680 	case ETHERTYPE_IPV6:
   7681 		offset = ETHER_HDR_LEN;
   7682 		break;
   7683 
   7684 	case ETHERTYPE_VLAN:
   7685 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7686 		break;
   7687 
   7688 	default:
   7689 		/* Don't support this protocol or encapsulation. */
   7690 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7691 		txq->txq_last_hw_ipcs = 0;
   7692 		txq->txq_last_hw_tucs = 0;
   7693 		*fieldsp = 0;
   7694 		*cmdp = 0;
   7695 		return;
   7696 	}
   7697 
   7698 	if ((m0->m_pkthdr.csum_flags &
   7699 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7700 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7701 	} else
   7702 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7703 
   7704 	ipcse = offset + iphl - 1;
   7705 
   7706 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7707 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7708 	seg = 0;
   7709 	fields = 0;
   7710 
   7711 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7712 		int hlen = offset + iphl;
   7713 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7714 
   7715 		if (__predict_false(m0->m_len <
   7716 				    (hlen + sizeof(struct tcphdr)))) {
   7717 			/*
   7718 			 * TCP/IP headers are not in the first mbuf; we need
   7719 			 * to do this the slow and painful way. Let's just
   7720 			 * hope this doesn't happen very often.
   7721 			 */
   7722 			struct tcphdr th;
   7723 
   7724 			WM_Q_EVCNT_INCR(txq, tsopain);
   7725 
   7726 			m_copydata(m0, hlen, sizeof(th), &th);
   7727 			if (v4) {
   7728 				struct ip ip;
   7729 
   7730 				m_copydata(m0, offset, sizeof(ip), &ip);
   7731 				ip.ip_len = 0;
   7732 				m_copyback(m0,
   7733 				    offset + offsetof(struct ip, ip_len),
   7734 				    sizeof(ip.ip_len), &ip.ip_len);
   7735 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7736 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7737 			} else {
   7738 				struct ip6_hdr ip6;
   7739 
   7740 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7741 				ip6.ip6_plen = 0;
   7742 				m_copyback(m0,
   7743 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7744 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7745 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7746 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7747 			}
   7748 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7749 			    sizeof(th.th_sum), &th.th_sum);
   7750 
   7751 			hlen += th.th_off << 2;
   7752 		} else {
   7753 			/*
   7754 			 * TCP/IP headers are in the first mbuf; we can do
   7755 			 * this the easy way.
   7756 			 */
   7757 			struct tcphdr *th;
   7758 
   7759 			if (v4) {
   7760 				struct ip *ip =
   7761 				    (void *)(mtod(m0, char *) + offset);
   7762 				th = (void *)(mtod(m0, char *) + hlen);
   7763 
   7764 				ip->ip_len = 0;
   7765 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7766 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7767 			} else {
   7768 				struct ip6_hdr *ip6 =
   7769 				    (void *)(mtod(m0, char *) + offset);
   7770 				th = (void *)(mtod(m0, char *) + hlen);
   7771 
   7772 				ip6->ip6_plen = 0;
   7773 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7774 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7775 			}
   7776 			hlen += th->th_off << 2;
   7777 		}
   7778 
   7779 		if (v4) {
   7780 			WM_Q_EVCNT_INCR(txq, tso);
   7781 			cmdlen |= WTX_TCPIP_CMD_IP;
   7782 		} else {
   7783 			WM_Q_EVCNT_INCR(txq, tso6);
   7784 			ipcse = 0;
   7785 		}
   7786 		cmd |= WTX_TCPIP_CMD_TSE;
   7787 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7788 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7789 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7790 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7791 	}
   7792 
   7793 	/*
   7794 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7795 	 * offload feature, if we load the context descriptor, we
   7796 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7797 	 */
   7798 
   7799 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7800 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7801 	    WTX_TCPIP_IPCSE(ipcse);
   7802 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7803 		WM_Q_EVCNT_INCR(txq, ipsum);
   7804 		fields |= WTX_IXSM;
   7805 	}
   7806 
   7807 	offset += iphl;
   7808 
   7809 	if (m0->m_pkthdr.csum_flags &
   7810 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7811 		WM_Q_EVCNT_INCR(txq, tusum);
   7812 		fields |= WTX_TXSM;
   7813 		tucs = WTX_TCPIP_TUCSS(offset) |
   7814 		    WTX_TCPIP_TUCSO(offset +
   7815 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7816 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7817 	} else if ((m0->m_pkthdr.csum_flags &
   7818 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7819 		WM_Q_EVCNT_INCR(txq, tusum6);
   7820 		fields |= WTX_TXSM;
   7821 		tucs = WTX_TCPIP_TUCSS(offset) |
   7822 		    WTX_TCPIP_TUCSO(offset +
   7823 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7824 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7825 	} else {
   7826 		/* Just initialize it to a valid TCP context. */
   7827 		tucs = WTX_TCPIP_TUCSS(offset) |
   7828 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7829 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7830 	}
   7831 
   7832 	*cmdp = cmd;
   7833 	*fieldsp = fields;
   7834 
   7835 	/*
   7836 	 * We don't have to write context descriptor for every packet
   7837 	 * except for 82574. For 82574, we must write context descriptor
   7838 	 * for every packet when we use two descriptor queues.
   7839 	 *
   7840 	 * The 82574L can only remember the *last* context used
   7841 	 * regardless of queue that it was use for.  We cannot reuse
   7842 	 * contexts on this hardware platform and must generate a new
   7843 	 * context every time.  82574L hardware spec, section 7.2.6,
   7844 	 * second note.
   7845 	 */
   7846 	if (sc->sc_nqueues < 2) {
   7847 		/*
   7848 		 * Setting up new checksum offload context for every
   7849 		 * frames takes a lot of processing time for hardware.
   7850 		 * This also reduces performance a lot for small sized
   7851 		 * frames so avoid it if driver can use previously
   7852 		 * configured checksum offload context.
   7853 		 * For TSO, in theory we can use the same TSO context only if
   7854 		 * frame is the same type(IP/TCP) and the same MSS. However
   7855 		 * checking whether a frame has the same IP/TCP structure is
   7856 		 * hard thing so just ignore that and always restablish a
   7857 		 * new TSO context.
   7858 		 */
   7859 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   7860 		    == 0) {
   7861 			if (txq->txq_last_hw_cmd == cmd &&
   7862 			    txq->txq_last_hw_fields == fields &&
   7863 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   7864 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   7865 				WM_Q_EVCNT_INCR(txq, skipcontext);
   7866 				return;
   7867 			}
   7868 		}
   7869 
   7870 		txq->txq_last_hw_cmd = cmd;
   7871 		txq->txq_last_hw_fields = fields;
   7872 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   7873 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   7874 	}
   7875 
   7876 	/* Fill in the context descriptor. */
   7877 	t = (struct livengood_tcpip_ctxdesc *)
   7878 	    &txq->txq_descs[txq->txq_next];
   7879 	t->tcpip_ipcs = htole32(ipcs);
   7880 	t->tcpip_tucs = htole32(tucs);
   7881 	t->tcpip_cmdlen = htole32(cmdlen);
   7882 	t->tcpip_seg = htole32(seg);
   7883 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7884 
   7885 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7886 	txs->txs_ndesc++;
   7887 }
   7888 
   7889 static inline int
   7890 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7891 {
   7892 	struct wm_softc *sc = ifp->if_softc;
   7893 	u_int cpuid = cpu_index(curcpu());
   7894 
   7895 	/*
   7896 	 * Currently, simple distribute strategy.
   7897 	 * TODO:
   7898 	 * distribute by flowid(RSS has value).
   7899 	 */
   7900 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7901 }
   7902 
   7903 static inline bool
   7904 wm_linkdown_discard(struct wm_txqueue *txq)
   7905 {
   7906 
   7907 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   7908 		return true;
   7909 
   7910 	return false;
   7911 }
   7912 
   7913 /*
   7914  * wm_start:		[ifnet interface function]
   7915  *
   7916  *	Start packet transmission on the interface.
   7917  */
   7918 static void
   7919 wm_start(struct ifnet *ifp)
   7920 {
   7921 	struct wm_softc *sc = ifp->if_softc;
   7922 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7923 
   7924 #ifdef WM_MPSAFE
   7925 	KASSERT(if_is_mpsafe(ifp));
   7926 #endif
   7927 	/*
   7928 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7929 	 */
   7930 
   7931 	mutex_enter(txq->txq_lock);
   7932 	if (!txq->txq_stopping)
   7933 		wm_start_locked(ifp);
   7934 	mutex_exit(txq->txq_lock);
   7935 }
   7936 
   7937 static void
   7938 wm_start_locked(struct ifnet *ifp)
   7939 {
   7940 	struct wm_softc *sc = ifp->if_softc;
   7941 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7942 
   7943 	wm_send_common_locked(ifp, txq, false);
   7944 }
   7945 
   7946 static int
   7947 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7948 {
   7949 	int qid;
   7950 	struct wm_softc *sc = ifp->if_softc;
   7951 	struct wm_txqueue *txq;
   7952 
   7953 	qid = wm_select_txqueue(ifp, m);
   7954 	txq = &sc->sc_queue[qid].wmq_txq;
   7955 
   7956 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7957 		m_freem(m);
   7958 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7959 		return ENOBUFS;
   7960 	}
   7961 
   7962 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7963 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7964 	if (m->m_flags & M_MCAST)
   7965 		if_statinc_ref(nsr, if_omcasts);
   7966 	IF_STAT_PUTREF(ifp);
   7967 
   7968 	if (mutex_tryenter(txq->txq_lock)) {
   7969 		if (!txq->txq_stopping)
   7970 			wm_transmit_locked(ifp, txq);
   7971 		mutex_exit(txq->txq_lock);
   7972 	}
   7973 
   7974 	return 0;
   7975 }
   7976 
   7977 static void
   7978 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7979 {
   7980 
   7981 	wm_send_common_locked(ifp, txq, true);
   7982 }
   7983 
   7984 static void
   7985 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7986     bool is_transmit)
   7987 {
   7988 	struct wm_softc *sc = ifp->if_softc;
   7989 	struct mbuf *m0;
   7990 	struct wm_txsoft *txs;
   7991 	bus_dmamap_t dmamap;
   7992 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7993 	bus_addr_t curaddr;
   7994 	bus_size_t seglen, curlen;
   7995 	uint32_t cksumcmd;
   7996 	uint8_t cksumfields;
   7997 	bool remap = true;
   7998 
   7999 	KASSERT(mutex_owned(txq->txq_lock));
   8000 
   8001 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8002 		return;
   8003 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8004 		return;
   8005 
   8006 	if (__predict_false(wm_linkdown_discard(txq))) {
   8007 		do {
   8008 			if (is_transmit)
   8009 				m0 = pcq_get(txq->txq_interq);
   8010 			else
   8011 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8012 			/*
   8013 			 * increment successed packet counter as in the case
   8014 			 * which the packet is discarded by link down PHY.
   8015 			 */
   8016 			if (m0 != NULL)
   8017 				if_statinc(ifp, if_opackets);
   8018 			m_freem(m0);
   8019 		} while (m0 != NULL);
   8020 		return;
   8021 	}
   8022 
   8023 	/* Remember the previous number of free descriptors. */
   8024 	ofree = txq->txq_free;
   8025 
   8026 	/*
   8027 	 * Loop through the send queue, setting up transmit descriptors
   8028 	 * until we drain the queue, or use up all available transmit
   8029 	 * descriptors.
   8030 	 */
   8031 	for (;;) {
   8032 		m0 = NULL;
   8033 
   8034 		/* Get a work queue entry. */
   8035 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8036 			wm_txeof(txq, UINT_MAX);
   8037 			if (txq->txq_sfree == 0) {
   8038 				DPRINTF(sc, WM_DEBUG_TX,
   8039 				    ("%s: TX: no free job descriptors\n",
   8040 					device_xname(sc->sc_dev)));
   8041 				WM_Q_EVCNT_INCR(txq, txsstall);
   8042 				break;
   8043 			}
   8044 		}
   8045 
   8046 		/* Grab a packet off the queue. */
   8047 		if (is_transmit)
   8048 			m0 = pcq_get(txq->txq_interq);
   8049 		else
   8050 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8051 		if (m0 == NULL)
   8052 			break;
   8053 
   8054 		DPRINTF(sc, WM_DEBUG_TX,
   8055 		    ("%s: TX: have packet to transmit: %p\n",
   8056 			device_xname(sc->sc_dev), m0));
   8057 
   8058 		txs = &txq->txq_soft[txq->txq_snext];
   8059 		dmamap = txs->txs_dmamap;
   8060 
   8061 		use_tso = (m0->m_pkthdr.csum_flags &
   8062 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   8063 
   8064 		/*
   8065 		 * So says the Linux driver:
   8066 		 * The controller does a simple calculation to make sure
   8067 		 * there is enough room in the FIFO before initiating the
   8068 		 * DMA for each buffer. The calc is:
   8069 		 *	4 = ceil(buffer len / MSS)
   8070 		 * To make sure we don't overrun the FIFO, adjust the max
   8071 		 * buffer len if the MSS drops.
   8072 		 */
   8073 		dmamap->dm_maxsegsz =
   8074 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   8075 		    ? m0->m_pkthdr.segsz << 2
   8076 		    : WTX_MAX_LEN;
   8077 
   8078 		/*
   8079 		 * Load the DMA map.  If this fails, the packet either
   8080 		 * didn't fit in the allotted number of segments, or we
   8081 		 * were short on resources.  For the too-many-segments
   8082 		 * case, we simply report an error and drop the packet,
   8083 		 * since we can't sanely copy a jumbo packet to a single
   8084 		 * buffer.
   8085 		 */
   8086 retry:
   8087 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8088 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8089 		if (__predict_false(error)) {
   8090 			if (error == EFBIG) {
   8091 				if (remap == true) {
   8092 					struct mbuf *m;
   8093 
   8094 					remap = false;
   8095 					m = m_defrag(m0, M_NOWAIT);
   8096 					if (m != NULL) {
   8097 						WM_Q_EVCNT_INCR(txq, defrag);
   8098 						m0 = m;
   8099 						goto retry;
   8100 					}
   8101 				}
   8102 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8103 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8104 				    "DMA segments, dropping...\n",
   8105 				    device_xname(sc->sc_dev));
   8106 				wm_dump_mbuf_chain(sc, m0);
   8107 				m_freem(m0);
   8108 				continue;
   8109 			}
   8110 			/* Short on resources, just stop for now. */
   8111 			DPRINTF(sc, WM_DEBUG_TX,
   8112 			    ("%s: TX: dmamap load failed: %d\n",
   8113 				device_xname(sc->sc_dev), error));
   8114 			break;
   8115 		}
   8116 
   8117 		segs_needed = dmamap->dm_nsegs;
   8118 		if (use_tso) {
   8119 			/* For sentinel descriptor; see below. */
   8120 			segs_needed++;
   8121 		}
   8122 
   8123 		/*
   8124 		 * Ensure we have enough descriptors free to describe
   8125 		 * the packet. Note, we always reserve one descriptor
   8126 		 * at the end of the ring due to the semantics of the
   8127 		 * TDT register, plus one more in the event we need
   8128 		 * to load offload context.
   8129 		 */
   8130 		if (segs_needed > txq->txq_free - 2) {
   8131 			/*
   8132 			 * Not enough free descriptors to transmit this
   8133 			 * packet.  We haven't committed anything yet,
   8134 			 * so just unload the DMA map, put the packet
   8135 			 * pack on the queue, and punt. Notify the upper
   8136 			 * layer that there are no more slots left.
   8137 			 */
   8138 			DPRINTF(sc, WM_DEBUG_TX,
   8139 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8140 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8141 				segs_needed, txq->txq_free - 1));
   8142 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8143 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8144 			WM_Q_EVCNT_INCR(txq, txdstall);
   8145 			break;
   8146 		}
   8147 
   8148 		/*
   8149 		 * Check for 82547 Tx FIFO bug. We need to do this
   8150 		 * once we know we can transmit the packet, since we
   8151 		 * do some internal FIFO space accounting here.
   8152 		 */
   8153 		if (sc->sc_type == WM_T_82547 &&
   8154 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8155 			DPRINTF(sc, WM_DEBUG_TX,
   8156 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8157 				device_xname(sc->sc_dev)));
   8158 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8159 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8160 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8161 			break;
   8162 		}
   8163 
   8164 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8165 
   8166 		DPRINTF(sc, WM_DEBUG_TX,
   8167 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8168 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8169 
   8170 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8171 
   8172 		/*
   8173 		 * Store a pointer to the packet so that we can free it
   8174 		 * later.
   8175 		 *
   8176 		 * Initially, we consider the number of descriptors the
   8177 		 * packet uses the number of DMA segments.  This may be
   8178 		 * incremented by 1 if we do checksum offload (a descriptor
   8179 		 * is used to set the checksum context).
   8180 		 */
   8181 		txs->txs_mbuf = m0;
   8182 		txs->txs_firstdesc = txq->txq_next;
   8183 		txs->txs_ndesc = segs_needed;
   8184 
   8185 		/* Set up offload parameters for this packet. */
   8186 		if (m0->m_pkthdr.csum_flags &
   8187 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8188 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8189 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8190 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8191 		} else {
   8192 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8193 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8194 			cksumcmd = 0;
   8195 			cksumfields = 0;
   8196 		}
   8197 
   8198 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8199 
   8200 		/* Sync the DMA map. */
   8201 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8202 		    BUS_DMASYNC_PREWRITE);
   8203 
   8204 		/* Initialize the transmit descriptor. */
   8205 		for (nexttx = txq->txq_next, seg = 0;
   8206 		     seg < dmamap->dm_nsegs; seg++) {
   8207 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8208 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8209 			     seglen != 0;
   8210 			     curaddr += curlen, seglen -= curlen,
   8211 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8212 				curlen = seglen;
   8213 
   8214 				/*
   8215 				 * So says the Linux driver:
   8216 				 * Work around for premature descriptor
   8217 				 * write-backs in TSO mode.  Append a
   8218 				 * 4-byte sentinel descriptor.
   8219 				 */
   8220 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8221 				    curlen > 8)
   8222 					curlen -= 4;
   8223 
   8224 				wm_set_dma_addr(
   8225 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8226 				txq->txq_descs[nexttx].wtx_cmdlen
   8227 				    = htole32(cksumcmd | curlen);
   8228 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8229 				    = 0;
   8230 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8231 				    = cksumfields;
   8232 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8233 				lasttx = nexttx;
   8234 
   8235 				DPRINTF(sc, WM_DEBUG_TX,
   8236 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8237 					"len %#04zx\n",
   8238 					device_xname(sc->sc_dev), nexttx,
   8239 					(uint64_t)curaddr, curlen));
   8240 			}
   8241 		}
   8242 
   8243 		KASSERT(lasttx != -1);
   8244 
   8245 		/*
   8246 		 * Set up the command byte on the last descriptor of
   8247 		 * the packet. If we're in the interrupt delay window,
   8248 		 * delay the interrupt.
   8249 		 */
   8250 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8251 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8252 
   8253 		/*
   8254 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8255 		 * up the descriptor to encapsulate the packet for us.
   8256 		 *
   8257 		 * This is only valid on the last descriptor of the packet.
   8258 		 */
   8259 		if (vlan_has_tag(m0)) {
   8260 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8261 			    htole32(WTX_CMD_VLE);
   8262 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8263 			    = htole16(vlan_get_tag(m0));
   8264 		}
   8265 
   8266 		txs->txs_lastdesc = lasttx;
   8267 
   8268 		DPRINTF(sc, WM_DEBUG_TX,
   8269 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8270 			device_xname(sc->sc_dev),
   8271 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8272 
   8273 		/* Sync the descriptors we're using. */
   8274 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8275 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8276 
   8277 		/* Give the packet to the chip. */
   8278 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8279 
   8280 		DPRINTF(sc, WM_DEBUG_TX,
   8281 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8282 
   8283 		DPRINTF(sc, WM_DEBUG_TX,
   8284 		    ("%s: TX: finished transmitting packet, job %d\n",
   8285 			device_xname(sc->sc_dev), txq->txq_snext));
   8286 
   8287 		/* Advance the tx pointer. */
   8288 		txq->txq_free -= txs->txs_ndesc;
   8289 		txq->txq_next = nexttx;
   8290 
   8291 		txq->txq_sfree--;
   8292 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8293 
   8294 		/* Pass the packet to any BPF listeners. */
   8295 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8296 	}
   8297 
   8298 	if (m0 != NULL) {
   8299 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8300 		WM_Q_EVCNT_INCR(txq, descdrop);
   8301 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8302 			__func__));
   8303 		m_freem(m0);
   8304 	}
   8305 
   8306 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8307 		/* No more slots; notify upper layer. */
   8308 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8309 	}
   8310 
   8311 	if (txq->txq_free != ofree) {
   8312 		/* Set a watchdog timer in case the chip flakes out. */
   8313 		txq->txq_lastsent = time_uptime;
   8314 		txq->txq_sending = true;
   8315 	}
   8316 }
   8317 
   8318 /*
   8319  * wm_nq_tx_offload:
   8320  *
   8321  *	Set up TCP/IP checksumming parameters for the
   8322  *	specified packet, for NEWQUEUE devices
   8323  */
   8324 static void
   8325 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8326     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8327 {
   8328 	struct mbuf *m0 = txs->txs_mbuf;
   8329 	uint32_t vl_len, mssidx, cmdc;
   8330 	struct ether_header *eh;
   8331 	int offset, iphl;
   8332 
   8333 	/*
   8334 	 * XXX It would be nice if the mbuf pkthdr had offset
   8335 	 * fields for the protocol headers.
   8336 	 */
   8337 	*cmdlenp = 0;
   8338 	*fieldsp = 0;
   8339 
   8340 	eh = mtod(m0, struct ether_header *);
   8341 	switch (htons(eh->ether_type)) {
   8342 	case ETHERTYPE_IP:
   8343 	case ETHERTYPE_IPV6:
   8344 		offset = ETHER_HDR_LEN;
   8345 		break;
   8346 
   8347 	case ETHERTYPE_VLAN:
   8348 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8349 		break;
   8350 
   8351 	default:
   8352 		/* Don't support this protocol or encapsulation. */
   8353 		*do_csum = false;
   8354 		return;
   8355 	}
   8356 	*do_csum = true;
   8357 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8358 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8359 
   8360 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8361 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8362 
   8363 	if ((m0->m_pkthdr.csum_flags &
   8364 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8365 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8366 	} else {
   8367 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8368 	}
   8369 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8370 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8371 
   8372 	if (vlan_has_tag(m0)) {
   8373 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8374 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8375 		*cmdlenp |= NQTX_CMD_VLE;
   8376 	}
   8377 
   8378 	mssidx = 0;
   8379 
   8380 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8381 		int hlen = offset + iphl;
   8382 		int tcp_hlen;
   8383 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8384 
   8385 		if (__predict_false(m0->m_len <
   8386 				    (hlen + sizeof(struct tcphdr)))) {
   8387 			/*
   8388 			 * TCP/IP headers are not in the first mbuf; we need
   8389 			 * to do this the slow and painful way. Let's just
   8390 			 * hope this doesn't happen very often.
   8391 			 */
   8392 			struct tcphdr th;
   8393 
   8394 			WM_Q_EVCNT_INCR(txq, tsopain);
   8395 
   8396 			m_copydata(m0, hlen, sizeof(th), &th);
   8397 			if (v4) {
   8398 				struct ip ip;
   8399 
   8400 				m_copydata(m0, offset, sizeof(ip), &ip);
   8401 				ip.ip_len = 0;
   8402 				m_copyback(m0,
   8403 				    offset + offsetof(struct ip, ip_len),
   8404 				    sizeof(ip.ip_len), &ip.ip_len);
   8405 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8406 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8407 			} else {
   8408 				struct ip6_hdr ip6;
   8409 
   8410 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8411 				ip6.ip6_plen = 0;
   8412 				m_copyback(m0,
   8413 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8414 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8415 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8416 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8417 			}
   8418 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8419 			    sizeof(th.th_sum), &th.th_sum);
   8420 
   8421 			tcp_hlen = th.th_off << 2;
   8422 		} else {
   8423 			/*
   8424 			 * TCP/IP headers are in the first mbuf; we can do
   8425 			 * this the easy way.
   8426 			 */
   8427 			struct tcphdr *th;
   8428 
   8429 			if (v4) {
   8430 				struct ip *ip =
   8431 				    (void *)(mtod(m0, char *) + offset);
   8432 				th = (void *)(mtod(m0, char *) + hlen);
   8433 
   8434 				ip->ip_len = 0;
   8435 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8436 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8437 			} else {
   8438 				struct ip6_hdr *ip6 =
   8439 				    (void *)(mtod(m0, char *) + offset);
   8440 				th = (void *)(mtod(m0, char *) + hlen);
   8441 
   8442 				ip6->ip6_plen = 0;
   8443 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8444 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8445 			}
   8446 			tcp_hlen = th->th_off << 2;
   8447 		}
   8448 		hlen += tcp_hlen;
   8449 		*cmdlenp |= NQTX_CMD_TSE;
   8450 
   8451 		if (v4) {
   8452 			WM_Q_EVCNT_INCR(txq, tso);
   8453 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8454 		} else {
   8455 			WM_Q_EVCNT_INCR(txq, tso6);
   8456 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8457 		}
   8458 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8459 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8460 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8461 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8462 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8463 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8464 	} else {
   8465 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8466 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8467 	}
   8468 
   8469 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8470 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8471 		cmdc |= NQTXC_CMD_IP4;
   8472 	}
   8473 
   8474 	if (m0->m_pkthdr.csum_flags &
   8475 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8476 		WM_Q_EVCNT_INCR(txq, tusum);
   8477 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8478 			cmdc |= NQTXC_CMD_TCP;
   8479 		else
   8480 			cmdc |= NQTXC_CMD_UDP;
   8481 
   8482 		cmdc |= NQTXC_CMD_IP4;
   8483 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8484 	}
   8485 	if (m0->m_pkthdr.csum_flags &
   8486 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8487 		WM_Q_EVCNT_INCR(txq, tusum6);
   8488 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8489 			cmdc |= NQTXC_CMD_TCP;
   8490 		else
   8491 			cmdc |= NQTXC_CMD_UDP;
   8492 
   8493 		cmdc |= NQTXC_CMD_IP6;
   8494 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8495 	}
   8496 
   8497 	/*
   8498 	 * We don't have to write context descriptor for every packet to
   8499 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8500 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8501 	 * controllers.
   8502 	 * It would be overhead to write context descriptor for every packet,
   8503 	 * however it does not cause problems.
   8504 	 */
   8505 	/* Fill in the context descriptor. */
   8506 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8507 	    htole32(vl_len);
   8508 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8509 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8510 	    htole32(cmdc);
   8511 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8512 	    htole32(mssidx);
   8513 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8514 	DPRINTF(sc, WM_DEBUG_TX,
   8515 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8516 		txq->txq_next, 0, vl_len));
   8517 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8518 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8519 	txs->txs_ndesc++;
   8520 }
   8521 
   8522 /*
   8523  * wm_nq_start:		[ifnet interface function]
   8524  *
   8525  *	Start packet transmission on the interface for NEWQUEUE devices
   8526  */
   8527 static void
   8528 wm_nq_start(struct ifnet *ifp)
   8529 {
   8530 	struct wm_softc *sc = ifp->if_softc;
   8531 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8532 
   8533 #ifdef WM_MPSAFE
   8534 	KASSERT(if_is_mpsafe(ifp));
   8535 #endif
   8536 	/*
   8537 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8538 	 */
   8539 
   8540 	mutex_enter(txq->txq_lock);
   8541 	if (!txq->txq_stopping)
   8542 		wm_nq_start_locked(ifp);
   8543 	mutex_exit(txq->txq_lock);
   8544 }
   8545 
   8546 static void
   8547 wm_nq_start_locked(struct ifnet *ifp)
   8548 {
   8549 	struct wm_softc *sc = ifp->if_softc;
   8550 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8551 
   8552 	wm_nq_send_common_locked(ifp, txq, false);
   8553 }
   8554 
   8555 static int
   8556 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8557 {
   8558 	int qid;
   8559 	struct wm_softc *sc = ifp->if_softc;
   8560 	struct wm_txqueue *txq;
   8561 
   8562 	qid = wm_select_txqueue(ifp, m);
   8563 	txq = &sc->sc_queue[qid].wmq_txq;
   8564 
   8565 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8566 		m_freem(m);
   8567 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8568 		return ENOBUFS;
   8569 	}
   8570 
   8571 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8572 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8573 	if (m->m_flags & M_MCAST)
   8574 		if_statinc_ref(nsr, if_omcasts);
   8575 	IF_STAT_PUTREF(ifp);
   8576 
   8577 	/*
   8578 	 * The situations which this mutex_tryenter() fails at running time
   8579 	 * are below two patterns.
   8580 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8581 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8582 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8583 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8584 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8585 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8586 	 * stuck, either.
   8587 	 */
   8588 	if (mutex_tryenter(txq->txq_lock)) {
   8589 		if (!txq->txq_stopping)
   8590 			wm_nq_transmit_locked(ifp, txq);
   8591 		mutex_exit(txq->txq_lock);
   8592 	}
   8593 
   8594 	return 0;
   8595 }
   8596 
   8597 static void
   8598 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8599 {
   8600 
   8601 	wm_nq_send_common_locked(ifp, txq, true);
   8602 }
   8603 
   8604 static void
   8605 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8606     bool is_transmit)
   8607 {
   8608 	struct wm_softc *sc = ifp->if_softc;
   8609 	struct mbuf *m0;
   8610 	struct wm_txsoft *txs;
   8611 	bus_dmamap_t dmamap;
   8612 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8613 	bool do_csum, sent;
   8614 	bool remap = true;
   8615 
   8616 	KASSERT(mutex_owned(txq->txq_lock));
   8617 
   8618 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8619 		return;
   8620 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8621 		return;
   8622 
   8623 	if (__predict_false(wm_linkdown_discard(txq))) {
   8624 		do {
   8625 			if (is_transmit)
   8626 				m0 = pcq_get(txq->txq_interq);
   8627 			else
   8628 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8629 			/*
   8630 			 * increment successed packet counter as in the case
   8631 			 * which the packet is discarded by link down PHY.
   8632 			 */
   8633 			if (m0 != NULL)
   8634 				if_statinc(ifp, if_opackets);
   8635 			m_freem(m0);
   8636 		} while (m0 != NULL);
   8637 		return;
   8638 	}
   8639 
   8640 	sent = false;
   8641 
   8642 	/*
   8643 	 * Loop through the send queue, setting up transmit descriptors
   8644 	 * until we drain the queue, or use up all available transmit
   8645 	 * descriptors.
   8646 	 */
   8647 	for (;;) {
   8648 		m0 = NULL;
   8649 
   8650 		/* Get a work queue entry. */
   8651 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8652 			wm_txeof(txq, UINT_MAX);
   8653 			if (txq->txq_sfree == 0) {
   8654 				DPRINTF(sc, WM_DEBUG_TX,
   8655 				    ("%s: TX: no free job descriptors\n",
   8656 					device_xname(sc->sc_dev)));
   8657 				WM_Q_EVCNT_INCR(txq, txsstall);
   8658 				break;
   8659 			}
   8660 		}
   8661 
   8662 		/* Grab a packet off the queue. */
   8663 		if (is_transmit)
   8664 			m0 = pcq_get(txq->txq_interq);
   8665 		else
   8666 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8667 		if (m0 == NULL)
   8668 			break;
   8669 
   8670 		DPRINTF(sc, WM_DEBUG_TX,
   8671 		    ("%s: TX: have packet to transmit: %p\n",
   8672 		    device_xname(sc->sc_dev), m0));
   8673 
   8674 		txs = &txq->txq_soft[txq->txq_snext];
   8675 		dmamap = txs->txs_dmamap;
   8676 
   8677 		/*
   8678 		 * Load the DMA map.  If this fails, the packet either
   8679 		 * didn't fit in the allotted number of segments, or we
   8680 		 * were short on resources.  For the too-many-segments
   8681 		 * case, we simply report an error and drop the packet,
   8682 		 * since we can't sanely copy a jumbo packet to a single
   8683 		 * buffer.
   8684 		 */
   8685 retry:
   8686 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8687 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8688 		if (__predict_false(error)) {
   8689 			if (error == EFBIG) {
   8690 				if (remap == true) {
   8691 					struct mbuf *m;
   8692 
   8693 					remap = false;
   8694 					m = m_defrag(m0, M_NOWAIT);
   8695 					if (m != NULL) {
   8696 						WM_Q_EVCNT_INCR(txq, defrag);
   8697 						m0 = m;
   8698 						goto retry;
   8699 					}
   8700 				}
   8701 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8702 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8703 				    "DMA segments, dropping...\n",
   8704 				    device_xname(sc->sc_dev));
   8705 				wm_dump_mbuf_chain(sc, m0);
   8706 				m_freem(m0);
   8707 				continue;
   8708 			}
   8709 			/* Short on resources, just stop for now. */
   8710 			DPRINTF(sc, WM_DEBUG_TX,
   8711 			    ("%s: TX: dmamap load failed: %d\n",
   8712 				device_xname(sc->sc_dev), error));
   8713 			break;
   8714 		}
   8715 
   8716 		segs_needed = dmamap->dm_nsegs;
   8717 
   8718 		/*
   8719 		 * Ensure we have enough descriptors free to describe
   8720 		 * the packet. Note, we always reserve one descriptor
   8721 		 * at the end of the ring due to the semantics of the
   8722 		 * TDT register, plus one more in the event we need
   8723 		 * to load offload context.
   8724 		 */
   8725 		if (segs_needed > txq->txq_free - 2) {
   8726 			/*
   8727 			 * Not enough free descriptors to transmit this
   8728 			 * packet.  We haven't committed anything yet,
   8729 			 * so just unload the DMA map, put the packet
   8730 			 * pack on the queue, and punt. Notify the upper
   8731 			 * layer that there are no more slots left.
   8732 			 */
   8733 			DPRINTF(sc, WM_DEBUG_TX,
   8734 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8735 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8736 				segs_needed, txq->txq_free - 1));
   8737 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8738 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8739 			WM_Q_EVCNT_INCR(txq, txdstall);
   8740 			break;
   8741 		}
   8742 
   8743 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8744 
   8745 		DPRINTF(sc, WM_DEBUG_TX,
   8746 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8747 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8748 
   8749 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8750 
   8751 		/*
   8752 		 * Store a pointer to the packet so that we can free it
   8753 		 * later.
   8754 		 *
   8755 		 * Initially, we consider the number of descriptors the
   8756 		 * packet uses the number of DMA segments.  This may be
   8757 		 * incremented by 1 if we do checksum offload (a descriptor
   8758 		 * is used to set the checksum context).
   8759 		 */
   8760 		txs->txs_mbuf = m0;
   8761 		txs->txs_firstdesc = txq->txq_next;
   8762 		txs->txs_ndesc = segs_needed;
   8763 
   8764 		/* Set up offload parameters for this packet. */
   8765 		uint32_t cmdlen, fields, dcmdlen;
   8766 		if (m0->m_pkthdr.csum_flags &
   8767 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8768 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8769 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8770 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8771 			    &do_csum);
   8772 		} else {
   8773 			do_csum = false;
   8774 			cmdlen = 0;
   8775 			fields = 0;
   8776 		}
   8777 
   8778 		/* Sync the DMA map. */
   8779 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8780 		    BUS_DMASYNC_PREWRITE);
   8781 
   8782 		/* Initialize the first transmit descriptor. */
   8783 		nexttx = txq->txq_next;
   8784 		if (!do_csum) {
   8785 			/* Setup a legacy descriptor */
   8786 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8787 			    dmamap->dm_segs[0].ds_addr);
   8788 			txq->txq_descs[nexttx].wtx_cmdlen =
   8789 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8790 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8791 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8792 			if (vlan_has_tag(m0)) {
   8793 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8794 				    htole32(WTX_CMD_VLE);
   8795 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8796 				    htole16(vlan_get_tag(m0));
   8797 			} else
   8798 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8799 
   8800 			dcmdlen = 0;
   8801 		} else {
   8802 			/* Setup an advanced data descriptor */
   8803 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8804 			    htole64(dmamap->dm_segs[0].ds_addr);
   8805 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8806 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8807 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8808 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8809 			    htole32(fields);
   8810 			DPRINTF(sc, WM_DEBUG_TX,
   8811 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8812 				device_xname(sc->sc_dev), nexttx,
   8813 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8814 			DPRINTF(sc, WM_DEBUG_TX,
   8815 			    ("\t 0x%08x%08x\n", fields,
   8816 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8817 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8818 		}
   8819 
   8820 		lasttx = nexttx;
   8821 		nexttx = WM_NEXTTX(txq, nexttx);
   8822 		/*
   8823 		 * Fill in the next descriptors. legacy or advanced format
   8824 		 * is the same here
   8825 		 */
   8826 		for (seg = 1; seg < dmamap->dm_nsegs;
   8827 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8828 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8829 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8830 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8831 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8832 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8833 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8834 			lasttx = nexttx;
   8835 
   8836 			DPRINTF(sc, WM_DEBUG_TX,
   8837 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8838 				device_xname(sc->sc_dev), nexttx,
   8839 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8840 				dmamap->dm_segs[seg].ds_len));
   8841 		}
   8842 
   8843 		KASSERT(lasttx != -1);
   8844 
   8845 		/*
   8846 		 * Set up the command byte on the last descriptor of
   8847 		 * the packet. If we're in the interrupt delay window,
   8848 		 * delay the interrupt.
   8849 		 */
   8850 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8851 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8852 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8853 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8854 
   8855 		txs->txs_lastdesc = lasttx;
   8856 
   8857 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8858 		    device_xname(sc->sc_dev),
   8859 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8860 
   8861 		/* Sync the descriptors we're using. */
   8862 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8863 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8864 
   8865 		/* Give the packet to the chip. */
   8866 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8867 		sent = true;
   8868 
   8869 		DPRINTF(sc, WM_DEBUG_TX,
   8870 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8871 
   8872 		DPRINTF(sc, WM_DEBUG_TX,
   8873 		    ("%s: TX: finished transmitting packet, job %d\n",
   8874 			device_xname(sc->sc_dev), txq->txq_snext));
   8875 
   8876 		/* Advance the tx pointer. */
   8877 		txq->txq_free -= txs->txs_ndesc;
   8878 		txq->txq_next = nexttx;
   8879 
   8880 		txq->txq_sfree--;
   8881 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8882 
   8883 		/* Pass the packet to any BPF listeners. */
   8884 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8885 	}
   8886 
   8887 	if (m0 != NULL) {
   8888 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8889 		WM_Q_EVCNT_INCR(txq, descdrop);
   8890 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8891 			__func__));
   8892 		m_freem(m0);
   8893 	}
   8894 
   8895 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8896 		/* No more slots; notify upper layer. */
   8897 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8898 	}
   8899 
   8900 	if (sent) {
   8901 		/* Set a watchdog timer in case the chip flakes out. */
   8902 		txq->txq_lastsent = time_uptime;
   8903 		txq->txq_sending = true;
   8904 	}
   8905 }
   8906 
   8907 static void
   8908 wm_deferred_start_locked(struct wm_txqueue *txq)
   8909 {
   8910 	struct wm_softc *sc = txq->txq_sc;
   8911 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8912 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8913 	int qid = wmq->wmq_id;
   8914 
   8915 	KASSERT(mutex_owned(txq->txq_lock));
   8916 
   8917 	if (txq->txq_stopping) {
   8918 		mutex_exit(txq->txq_lock);
   8919 		return;
   8920 	}
   8921 
   8922 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8923 		/* XXX need for ALTQ or one CPU system */
   8924 		if (qid == 0)
   8925 			wm_nq_start_locked(ifp);
   8926 		wm_nq_transmit_locked(ifp, txq);
   8927 	} else {
   8928 		/* XXX need for ALTQ or one CPU system */
   8929 		if (qid == 0)
   8930 			wm_start_locked(ifp);
   8931 		wm_transmit_locked(ifp, txq);
   8932 	}
   8933 }
   8934 
   8935 /* Interrupt */
   8936 
   8937 /*
   8938  * wm_txeof:
   8939  *
   8940  *	Helper; handle transmit interrupts.
   8941  */
   8942 static bool
   8943 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8944 {
   8945 	struct wm_softc *sc = txq->txq_sc;
   8946 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8947 	struct wm_txsoft *txs;
   8948 	int count = 0;
   8949 	int i;
   8950 	uint8_t status;
   8951 	bool more = false;
   8952 
   8953 	KASSERT(mutex_owned(txq->txq_lock));
   8954 
   8955 	if (txq->txq_stopping)
   8956 		return false;
   8957 
   8958 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8959 
   8960 	/*
   8961 	 * Go through the Tx list and free mbufs for those
   8962 	 * frames which have been transmitted.
   8963 	 */
   8964 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8965 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8966 		if (limit-- == 0) {
   8967 			more = true;
   8968 			DPRINTF(sc, WM_DEBUG_TX,
   8969 			    ("%s: TX: loop limited, job %d is not processed\n",
   8970 				device_xname(sc->sc_dev), i));
   8971 			break;
   8972 		}
   8973 
   8974 		txs = &txq->txq_soft[i];
   8975 
   8976 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8977 			device_xname(sc->sc_dev), i));
   8978 
   8979 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8980 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8981 
   8982 		status =
   8983 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8984 		if ((status & WTX_ST_DD) == 0) {
   8985 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8986 			    BUS_DMASYNC_PREREAD);
   8987 			break;
   8988 		}
   8989 
   8990 		count++;
   8991 		DPRINTF(sc, WM_DEBUG_TX,
   8992 		    ("%s: TX: job %d done: descs %d..%d\n",
   8993 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8994 		    txs->txs_lastdesc));
   8995 
   8996 		/*
   8997 		 * XXX We should probably be using the statistics
   8998 		 * XXX registers, but I don't know if they exist
   8999 		 * XXX on chips before the i82544.
   9000 		 */
   9001 
   9002 #ifdef WM_EVENT_COUNTERS
   9003 		if (status & WTX_ST_TU)
   9004 			WM_Q_EVCNT_INCR(txq, underrun);
   9005 #endif /* WM_EVENT_COUNTERS */
   9006 
   9007 		/*
   9008 		 * 82574 and newer's document says the status field has neither
   9009 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   9010 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   9011 		 * Developer's Manual", 82574 datasheet and newer.
   9012 		 *
   9013 		 * XXX I saw the LC bit was set on I218 even though the media
   9014 		 * was full duplex, so the bit might be used for other
   9015 		 * meaning ...(I have no document).
   9016 		 */
   9017 
   9018 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   9019 		    && ((sc->sc_type < WM_T_82574)
   9020 			|| (sc->sc_type == WM_T_80003))) {
   9021 			if_statinc(ifp, if_oerrors);
   9022 			if (status & WTX_ST_LC)
   9023 				log(LOG_WARNING, "%s: late collision\n",
   9024 				    device_xname(sc->sc_dev));
   9025 			else if (status & WTX_ST_EC) {
   9026 				if_statadd(ifp, if_collisions,
   9027 				    TX_COLLISION_THRESHOLD + 1);
   9028 				log(LOG_WARNING, "%s: excessive collisions\n",
   9029 				    device_xname(sc->sc_dev));
   9030 			}
   9031 		} else
   9032 			if_statinc(ifp, if_opackets);
   9033 
   9034 		txq->txq_packets++;
   9035 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   9036 
   9037 		txq->txq_free += txs->txs_ndesc;
   9038 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   9039 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   9040 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   9041 		m_freem(txs->txs_mbuf);
   9042 		txs->txs_mbuf = NULL;
   9043 	}
   9044 
   9045 	/* Update the dirty transmit buffer pointer. */
   9046 	txq->txq_sdirty = i;
   9047 	DPRINTF(sc, WM_DEBUG_TX,
   9048 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   9049 
   9050 	if (count != 0)
   9051 		rnd_add_uint32(&sc->rnd_source, count);
   9052 
   9053 	/*
   9054 	 * If there are no more pending transmissions, cancel the watchdog
   9055 	 * timer.
   9056 	 */
   9057 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   9058 		txq->txq_sending = false;
   9059 
   9060 	return more;
   9061 }
   9062 
   9063 static inline uint32_t
   9064 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   9065 {
   9066 	struct wm_softc *sc = rxq->rxq_sc;
   9067 
   9068 	if (sc->sc_type == WM_T_82574)
   9069 		return EXTRXC_STATUS(
   9070 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9071 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9072 		return NQRXC_STATUS(
   9073 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9074 	else
   9075 		return rxq->rxq_descs[idx].wrx_status;
   9076 }
   9077 
   9078 static inline uint32_t
   9079 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9080 {
   9081 	struct wm_softc *sc = rxq->rxq_sc;
   9082 
   9083 	if (sc->sc_type == WM_T_82574)
   9084 		return EXTRXC_ERROR(
   9085 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9086 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9087 		return NQRXC_ERROR(
   9088 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9089 	else
   9090 		return rxq->rxq_descs[idx].wrx_errors;
   9091 }
   9092 
   9093 static inline uint16_t
   9094 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9095 {
   9096 	struct wm_softc *sc = rxq->rxq_sc;
   9097 
   9098 	if (sc->sc_type == WM_T_82574)
   9099 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9100 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9101 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9102 	else
   9103 		return rxq->rxq_descs[idx].wrx_special;
   9104 }
   9105 
   9106 static inline int
   9107 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9108 {
   9109 	struct wm_softc *sc = rxq->rxq_sc;
   9110 
   9111 	if (sc->sc_type == WM_T_82574)
   9112 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9113 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9114 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9115 	else
   9116 		return rxq->rxq_descs[idx].wrx_len;
   9117 }
   9118 
   9119 #ifdef WM_DEBUG
   9120 static inline uint32_t
   9121 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9122 {
   9123 	struct wm_softc *sc = rxq->rxq_sc;
   9124 
   9125 	if (sc->sc_type == WM_T_82574)
   9126 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9127 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9128 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9129 	else
   9130 		return 0;
   9131 }
   9132 
   9133 static inline uint8_t
   9134 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9135 {
   9136 	struct wm_softc *sc = rxq->rxq_sc;
   9137 
   9138 	if (sc->sc_type == WM_T_82574)
   9139 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9140 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9141 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9142 	else
   9143 		return 0;
   9144 }
   9145 #endif /* WM_DEBUG */
   9146 
   9147 static inline bool
   9148 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9149     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9150 {
   9151 
   9152 	if (sc->sc_type == WM_T_82574)
   9153 		return (status & ext_bit) != 0;
   9154 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9155 		return (status & nq_bit) != 0;
   9156 	else
   9157 		return (status & legacy_bit) != 0;
   9158 }
   9159 
   9160 static inline bool
   9161 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9162     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9163 {
   9164 
   9165 	if (sc->sc_type == WM_T_82574)
   9166 		return (error & ext_bit) != 0;
   9167 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9168 		return (error & nq_bit) != 0;
   9169 	else
   9170 		return (error & legacy_bit) != 0;
   9171 }
   9172 
   9173 static inline bool
   9174 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9175 {
   9176 
   9177 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9178 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9179 		return true;
   9180 	else
   9181 		return false;
   9182 }
   9183 
   9184 static inline bool
   9185 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9186 {
   9187 	struct wm_softc *sc = rxq->rxq_sc;
   9188 
   9189 	/* XXX missing error bit for newqueue? */
   9190 	if (wm_rxdesc_is_set_error(sc, errors,
   9191 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9192 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9193 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9194 		NQRXC_ERROR_RXE)) {
   9195 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9196 		    EXTRXC_ERROR_SE, 0))
   9197 			log(LOG_WARNING, "%s: symbol error\n",
   9198 			    device_xname(sc->sc_dev));
   9199 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9200 		    EXTRXC_ERROR_SEQ, 0))
   9201 			log(LOG_WARNING, "%s: receive sequence error\n",
   9202 			    device_xname(sc->sc_dev));
   9203 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9204 		    EXTRXC_ERROR_CE, 0))
   9205 			log(LOG_WARNING, "%s: CRC error\n",
   9206 			    device_xname(sc->sc_dev));
   9207 		return true;
   9208 	}
   9209 
   9210 	return false;
   9211 }
   9212 
   9213 static inline bool
   9214 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9215 {
   9216 	struct wm_softc *sc = rxq->rxq_sc;
   9217 
   9218 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9219 		NQRXC_STATUS_DD)) {
   9220 		/* We have processed all of the receive descriptors. */
   9221 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9222 		return false;
   9223 	}
   9224 
   9225 	return true;
   9226 }
   9227 
   9228 static inline bool
   9229 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9230     uint16_t vlantag, struct mbuf *m)
   9231 {
   9232 
   9233 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9234 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9235 		vlan_set_tag(m, le16toh(vlantag));
   9236 	}
   9237 
   9238 	return true;
   9239 }
   9240 
   9241 static inline void
   9242 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9243     uint32_t errors, struct mbuf *m)
   9244 {
   9245 	struct wm_softc *sc = rxq->rxq_sc;
   9246 
   9247 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9248 		if (wm_rxdesc_is_set_status(sc, status,
   9249 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9250 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9251 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9252 			if (wm_rxdesc_is_set_error(sc, errors,
   9253 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9254 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9255 		}
   9256 		if (wm_rxdesc_is_set_status(sc, status,
   9257 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9258 			/*
   9259 			 * Note: we don't know if this was TCP or UDP,
   9260 			 * so we just set both bits, and expect the
   9261 			 * upper layers to deal.
   9262 			 */
   9263 			WM_Q_EVCNT_INCR(rxq, tusum);
   9264 			m->m_pkthdr.csum_flags |=
   9265 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9266 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9267 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9268 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9269 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9270 		}
   9271 	}
   9272 }
   9273 
   9274 /*
   9275  * wm_rxeof:
   9276  *
   9277  *	Helper; handle receive interrupts.
   9278  */
   9279 static bool
   9280 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9281 {
   9282 	struct wm_softc *sc = rxq->rxq_sc;
   9283 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9284 	struct wm_rxsoft *rxs;
   9285 	struct mbuf *m;
   9286 	int i, len;
   9287 	int count = 0;
   9288 	uint32_t status, errors;
   9289 	uint16_t vlantag;
   9290 	bool more = false;
   9291 
   9292 	KASSERT(mutex_owned(rxq->rxq_lock));
   9293 
   9294 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9295 		if (limit-- == 0) {
   9296 			more = true;
   9297 			DPRINTF(sc, WM_DEBUG_RX,
   9298 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9299 				device_xname(sc->sc_dev), i));
   9300 			break;
   9301 		}
   9302 
   9303 		rxs = &rxq->rxq_soft[i];
   9304 
   9305 		DPRINTF(sc, WM_DEBUG_RX,
   9306 		    ("%s: RX: checking descriptor %d\n",
   9307 			device_xname(sc->sc_dev), i));
   9308 		wm_cdrxsync(rxq, i,
   9309 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9310 
   9311 		status = wm_rxdesc_get_status(rxq, i);
   9312 		errors = wm_rxdesc_get_errors(rxq, i);
   9313 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9314 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9315 #ifdef WM_DEBUG
   9316 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9317 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9318 #endif
   9319 
   9320 		if (!wm_rxdesc_dd(rxq, i, status)) {
   9321 			break;
   9322 		}
   9323 
   9324 		count++;
   9325 		if (__predict_false(rxq->rxq_discard)) {
   9326 			DPRINTF(sc, WM_DEBUG_RX,
   9327 			    ("%s: RX: discarding contents of descriptor %d\n",
   9328 				device_xname(sc->sc_dev), i));
   9329 			wm_init_rxdesc(rxq, i);
   9330 			if (wm_rxdesc_is_eop(rxq, status)) {
   9331 				/* Reset our state. */
   9332 				DPRINTF(sc, WM_DEBUG_RX,
   9333 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9334 					device_xname(sc->sc_dev)));
   9335 				rxq->rxq_discard = 0;
   9336 			}
   9337 			continue;
   9338 		}
   9339 
   9340 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9341 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9342 
   9343 		m = rxs->rxs_mbuf;
   9344 
   9345 		/*
   9346 		 * Add a new receive buffer to the ring, unless of
   9347 		 * course the length is zero. Treat the latter as a
   9348 		 * failed mapping.
   9349 		 */
   9350 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9351 			/*
   9352 			 * Failed, throw away what we've done so
   9353 			 * far, and discard the rest of the packet.
   9354 			 */
   9355 			if_statinc(ifp, if_ierrors);
   9356 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9357 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9358 			wm_init_rxdesc(rxq, i);
   9359 			if (!wm_rxdesc_is_eop(rxq, status))
   9360 				rxq->rxq_discard = 1;
   9361 			if (rxq->rxq_head != NULL)
   9362 				m_freem(rxq->rxq_head);
   9363 			WM_RXCHAIN_RESET(rxq);
   9364 			DPRINTF(sc, WM_DEBUG_RX,
   9365 			    ("%s: RX: Rx buffer allocation failed, "
   9366 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9367 				rxq->rxq_discard ? " (discard)" : ""));
   9368 			continue;
   9369 		}
   9370 
   9371 		m->m_len = len;
   9372 		rxq->rxq_len += len;
   9373 		DPRINTF(sc, WM_DEBUG_RX,
   9374 		    ("%s: RX: buffer at %p len %d\n",
   9375 			device_xname(sc->sc_dev), m->m_data, len));
   9376 
   9377 		/* If this is not the end of the packet, keep looking. */
   9378 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9379 			WM_RXCHAIN_LINK(rxq, m);
   9380 			DPRINTF(sc, WM_DEBUG_RX,
   9381 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9382 				device_xname(sc->sc_dev), rxq->rxq_len));
   9383 			continue;
   9384 		}
   9385 
   9386 		/*
   9387 		 * Okay, we have the entire packet now. The chip is
   9388 		 * configured to include the FCS except I35[04], I21[01].
   9389 		 * (not all chips can be configured to strip it), so we need
   9390 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9391 		 * in RCTL register is always set, so we don't trim it.
   9392 		 * PCH2 and newer chip also not include FCS when jumbo
   9393 		 * frame is used to do workaround an errata.
   9394 		 * May need to adjust length of previous mbuf in the
   9395 		 * chain if the current mbuf is too short.
   9396 		 */
   9397 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9398 			if (m->m_len < ETHER_CRC_LEN) {
   9399 				rxq->rxq_tail->m_len
   9400 				    -= (ETHER_CRC_LEN - m->m_len);
   9401 				m->m_len = 0;
   9402 			} else
   9403 				m->m_len -= ETHER_CRC_LEN;
   9404 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9405 		} else
   9406 			len = rxq->rxq_len;
   9407 
   9408 		WM_RXCHAIN_LINK(rxq, m);
   9409 
   9410 		*rxq->rxq_tailp = NULL;
   9411 		m = rxq->rxq_head;
   9412 
   9413 		WM_RXCHAIN_RESET(rxq);
   9414 
   9415 		DPRINTF(sc, WM_DEBUG_RX,
   9416 		    ("%s: RX: have entire packet, len -> %d\n",
   9417 			device_xname(sc->sc_dev), len));
   9418 
   9419 		/* If an error occurred, update stats and drop the packet. */
   9420 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9421 			m_freem(m);
   9422 			continue;
   9423 		}
   9424 
   9425 		/* No errors.  Receive the packet. */
   9426 		m_set_rcvif(m, ifp);
   9427 		m->m_pkthdr.len = len;
   9428 		/*
   9429 		 * TODO
   9430 		 * should be save rsshash and rsstype to this mbuf.
   9431 		 */
   9432 		DPRINTF(sc, WM_DEBUG_RX,
   9433 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9434 			device_xname(sc->sc_dev), rsstype, rsshash));
   9435 
   9436 		/*
   9437 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9438 		 * for us.  Associate the tag with the packet.
   9439 		 */
   9440 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9441 			continue;
   9442 
   9443 		/* Set up checksum info for this packet. */
   9444 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9445 
   9446 		rxq->rxq_packets++;
   9447 		rxq->rxq_bytes += len;
   9448 		/* Pass it on. */
   9449 		if_percpuq_enqueue(sc->sc_ipq, m);
   9450 
   9451 		if (rxq->rxq_stopping)
   9452 			break;
   9453 	}
   9454 	rxq->rxq_ptr = i;
   9455 
   9456 	if (count != 0)
   9457 		rnd_add_uint32(&sc->rnd_source, count);
   9458 
   9459 	DPRINTF(sc, WM_DEBUG_RX,
   9460 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9461 
   9462 	return more;
   9463 }
   9464 
   9465 /*
   9466  * wm_linkintr_gmii:
   9467  *
   9468  *	Helper; handle link interrupts for GMII.
   9469  */
   9470 static void
   9471 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9472 {
   9473 	device_t dev = sc->sc_dev;
   9474 	uint32_t status, reg;
   9475 	bool link;
   9476 	int rv;
   9477 
   9478 	KASSERT(WM_CORE_LOCKED(sc));
   9479 
   9480 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9481 		__func__));
   9482 
   9483 	if ((icr & ICR_LSC) == 0) {
   9484 		if (icr & ICR_RXSEQ)
   9485 			DPRINTF(sc, WM_DEBUG_LINK,
   9486 			    ("%s: LINK Receive sequence error\n",
   9487 				device_xname(dev)));
   9488 		return;
   9489 	}
   9490 
   9491 	/* Link status changed */
   9492 	status = CSR_READ(sc, WMREG_STATUS);
   9493 	link = status & STATUS_LU;
   9494 	if (link) {
   9495 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9496 			device_xname(dev),
   9497 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9498 		if (wm_phy_need_linkdown_discard(sc)) {
   9499 			DPRINTF(sc, WM_DEBUG_LINK,
   9500 			    ("%s: linkintr: Clear linkdown discard flag\n",
   9501 				device_xname(dev)));
   9502 			wm_clear_linkdown_discard(sc);
   9503 		}
   9504 	} else {
   9505 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9506 			device_xname(dev)));
   9507 		if (wm_phy_need_linkdown_discard(sc)) {
   9508 			DPRINTF(sc, WM_DEBUG_LINK,
   9509 			    ("%s: linkintr: Set linkdown discard flag\n",
   9510 				device_xname(dev)));
   9511 			wm_set_linkdown_discard(sc);
   9512 		}
   9513 	}
   9514 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9515 		wm_gig_downshift_workaround_ich8lan(sc);
   9516 
   9517 	if ((sc->sc_type == WM_T_ICH8)
   9518 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9519 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9520 	}
   9521 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9522 		device_xname(dev)));
   9523 	mii_pollstat(&sc->sc_mii);
   9524 	if (sc->sc_type == WM_T_82543) {
   9525 		int miistatus, active;
   9526 
   9527 		/*
   9528 		 * With 82543, we need to force speed and
   9529 		 * duplex on the MAC equal to what the PHY
   9530 		 * speed and duplex configuration is.
   9531 		 */
   9532 		miistatus = sc->sc_mii.mii_media_status;
   9533 
   9534 		if (miistatus & IFM_ACTIVE) {
   9535 			active = sc->sc_mii.mii_media_active;
   9536 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9537 			switch (IFM_SUBTYPE(active)) {
   9538 			case IFM_10_T:
   9539 				sc->sc_ctrl |= CTRL_SPEED_10;
   9540 				break;
   9541 			case IFM_100_TX:
   9542 				sc->sc_ctrl |= CTRL_SPEED_100;
   9543 				break;
   9544 			case IFM_1000_T:
   9545 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9546 				break;
   9547 			default:
   9548 				/*
   9549 				 * Fiber?
   9550 				 * Shoud not enter here.
   9551 				 */
   9552 				device_printf(dev, "unknown media (%x)\n",
   9553 				    active);
   9554 				break;
   9555 			}
   9556 			if (active & IFM_FDX)
   9557 				sc->sc_ctrl |= CTRL_FD;
   9558 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9559 		}
   9560 	} else if (sc->sc_type == WM_T_PCH) {
   9561 		wm_k1_gig_workaround_hv(sc,
   9562 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9563 	}
   9564 
   9565 	/*
   9566 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9567 	 * aggressive resulting in many collisions. To avoid this, increase
   9568 	 * the IPG and reduce Rx latency in the PHY.
   9569 	 */
   9570 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9571 	    && link) {
   9572 		uint32_t tipg_reg;
   9573 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9574 		bool fdx;
   9575 		uint16_t emi_addr, emi_val;
   9576 
   9577 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9578 		tipg_reg &= ~TIPG_IPGT_MASK;
   9579 		fdx = status & STATUS_FD;
   9580 
   9581 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9582 			tipg_reg |= 0xff;
   9583 			/* Reduce Rx latency in analog PHY */
   9584 			emi_val = 0;
   9585 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9586 		    fdx && speed != STATUS_SPEED_1000) {
   9587 			tipg_reg |= 0xc;
   9588 			emi_val = 1;
   9589 		} else {
   9590 			/* Roll back the default values */
   9591 			tipg_reg |= 0x08;
   9592 			emi_val = 1;
   9593 		}
   9594 
   9595 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9596 
   9597 		rv = sc->phy.acquire(sc);
   9598 		if (rv)
   9599 			return;
   9600 
   9601 		if (sc->sc_type == WM_T_PCH2)
   9602 			emi_addr = I82579_RX_CONFIG;
   9603 		else
   9604 			emi_addr = I217_RX_CONFIG;
   9605 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9606 
   9607 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9608 			uint16_t phy_reg;
   9609 
   9610 			sc->phy.readreg_locked(dev, 2,
   9611 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9612 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9613 			if (speed == STATUS_SPEED_100
   9614 			    || speed == STATUS_SPEED_10)
   9615 				phy_reg |= 0x3e8;
   9616 			else
   9617 				phy_reg |= 0xfa;
   9618 			sc->phy.writereg_locked(dev, 2,
   9619 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9620 
   9621 			if (speed == STATUS_SPEED_1000) {
   9622 				sc->phy.readreg_locked(dev, 2,
   9623 				    HV_PM_CTRL, &phy_reg);
   9624 
   9625 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9626 
   9627 				sc->phy.writereg_locked(dev, 2,
   9628 				    HV_PM_CTRL, phy_reg);
   9629 			}
   9630 		}
   9631 		sc->phy.release(sc);
   9632 
   9633 		if (rv)
   9634 			return;
   9635 
   9636 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9637 			uint16_t data, ptr_gap;
   9638 
   9639 			if (speed == STATUS_SPEED_1000) {
   9640 				rv = sc->phy.acquire(sc);
   9641 				if (rv)
   9642 					return;
   9643 
   9644 				rv = sc->phy.readreg_locked(dev, 2,
   9645 				    I82579_UNKNOWN1, &data);
   9646 				if (rv) {
   9647 					sc->phy.release(sc);
   9648 					return;
   9649 				}
   9650 
   9651 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9652 				if (ptr_gap < 0x18) {
   9653 					data &= ~(0x3ff << 2);
   9654 					data |= (0x18 << 2);
   9655 					rv = sc->phy.writereg_locked(dev,
   9656 					    2, I82579_UNKNOWN1, data);
   9657 				}
   9658 				sc->phy.release(sc);
   9659 				if (rv)
   9660 					return;
   9661 			} else {
   9662 				rv = sc->phy.acquire(sc);
   9663 				if (rv)
   9664 					return;
   9665 
   9666 				rv = sc->phy.writereg_locked(dev, 2,
   9667 				    I82579_UNKNOWN1, 0xc023);
   9668 				sc->phy.release(sc);
   9669 				if (rv)
   9670 					return;
   9671 
   9672 			}
   9673 		}
   9674 	}
   9675 
   9676 	/*
   9677 	 * I217 Packet Loss issue:
   9678 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9679 	 * on power up.
   9680 	 * Set the Beacon Duration for I217 to 8 usec
   9681 	 */
   9682 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9683 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9684 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9685 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9686 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9687 	}
   9688 
   9689 	/* Work-around I218 hang issue */
   9690 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9691 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9692 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9693 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9694 		wm_k1_workaround_lpt_lp(sc, link);
   9695 
   9696 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9697 		/*
   9698 		 * Set platform power management values for Latency
   9699 		 * Tolerance Reporting (LTR)
   9700 		 */
   9701 		wm_platform_pm_pch_lpt(sc,
   9702 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9703 	}
   9704 
   9705 	/* Clear link partner's EEE ability */
   9706 	sc->eee_lp_ability = 0;
   9707 
   9708 	/* FEXTNVM6 K1-off workaround */
   9709 	if (sc->sc_type == WM_T_PCH_SPT) {
   9710 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9711 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9712 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9713 		else
   9714 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9715 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9716 	}
   9717 
   9718 	if (!link)
   9719 		return;
   9720 
   9721 	switch (sc->sc_type) {
   9722 	case WM_T_PCH2:
   9723 		wm_k1_workaround_lv(sc);
   9724 		/* FALLTHROUGH */
   9725 	case WM_T_PCH:
   9726 		if (sc->sc_phytype == WMPHY_82578)
   9727 			wm_link_stall_workaround_hv(sc);
   9728 		break;
   9729 	default:
   9730 		break;
   9731 	}
   9732 
   9733 	/* Enable/Disable EEE after link up */
   9734 	if (sc->sc_phytype > WMPHY_82579)
   9735 		wm_set_eee_pchlan(sc);
   9736 }
   9737 
   9738 /*
   9739  * wm_linkintr_tbi:
   9740  *
   9741  *	Helper; handle link interrupts for TBI mode.
   9742  */
   9743 static void
   9744 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9745 {
   9746 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9747 	uint32_t status;
   9748 
   9749 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9750 		__func__));
   9751 
   9752 	status = CSR_READ(sc, WMREG_STATUS);
   9753 	if (icr & ICR_LSC) {
   9754 		wm_check_for_link(sc);
   9755 		if (status & STATUS_LU) {
   9756 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9757 				device_xname(sc->sc_dev),
   9758 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9759 			/*
   9760 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9761 			 * so we should update sc->sc_ctrl
   9762 			 */
   9763 
   9764 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9765 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9766 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9767 			if (status & STATUS_FD)
   9768 				sc->sc_tctl |=
   9769 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9770 			else
   9771 				sc->sc_tctl |=
   9772 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9773 			if (sc->sc_ctrl & CTRL_TFCE)
   9774 				sc->sc_fcrtl |= FCRTL_XONE;
   9775 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9776 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9777 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9778 			sc->sc_tbi_linkup = 1;
   9779 			if_link_state_change(ifp, LINK_STATE_UP);
   9780 		} else {
   9781 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9782 				device_xname(sc->sc_dev)));
   9783 			sc->sc_tbi_linkup = 0;
   9784 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9785 		}
   9786 		/* Update LED */
   9787 		wm_tbi_serdes_set_linkled(sc);
   9788 	} else if (icr & ICR_RXSEQ)
   9789 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9790 			device_xname(sc->sc_dev)));
   9791 }
   9792 
   9793 /*
   9794  * wm_linkintr_serdes:
   9795  *
   9796  *	Helper; handle link interrupts for TBI mode.
   9797  */
   9798 static void
   9799 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9800 {
   9801 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9802 	struct mii_data *mii = &sc->sc_mii;
   9803 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9804 	uint32_t pcs_adv, pcs_lpab, reg;
   9805 
   9806 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9807 		__func__));
   9808 
   9809 	if (icr & ICR_LSC) {
   9810 		/* Check PCS */
   9811 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9812 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9813 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9814 				device_xname(sc->sc_dev)));
   9815 			mii->mii_media_status |= IFM_ACTIVE;
   9816 			sc->sc_tbi_linkup = 1;
   9817 			if_link_state_change(ifp, LINK_STATE_UP);
   9818 		} else {
   9819 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9820 				device_xname(sc->sc_dev)));
   9821 			mii->mii_media_status |= IFM_NONE;
   9822 			sc->sc_tbi_linkup = 0;
   9823 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9824 			wm_tbi_serdes_set_linkled(sc);
   9825 			return;
   9826 		}
   9827 		mii->mii_media_active |= IFM_1000_SX;
   9828 		if ((reg & PCS_LSTS_FDX) != 0)
   9829 			mii->mii_media_active |= IFM_FDX;
   9830 		else
   9831 			mii->mii_media_active |= IFM_HDX;
   9832 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9833 			/* Check flow */
   9834 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9835 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9836 				DPRINTF(sc, WM_DEBUG_LINK,
   9837 				    ("XXX LINKOK but not ACOMP\n"));
   9838 				return;
   9839 			}
   9840 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9841 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9842 			DPRINTF(sc, WM_DEBUG_LINK,
   9843 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9844 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9845 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9846 				mii->mii_media_active |= IFM_FLOW
   9847 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9848 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9849 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9850 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9851 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9852 				mii->mii_media_active |= IFM_FLOW
   9853 				    | IFM_ETH_TXPAUSE;
   9854 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9855 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9856 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9857 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9858 				mii->mii_media_active |= IFM_FLOW
   9859 				    | IFM_ETH_RXPAUSE;
   9860 		}
   9861 		/* Update LED */
   9862 		wm_tbi_serdes_set_linkled(sc);
   9863 	} else
   9864 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9865 		    device_xname(sc->sc_dev)));
   9866 }
   9867 
   9868 /*
   9869  * wm_linkintr:
   9870  *
   9871  *	Helper; handle link interrupts.
   9872  */
   9873 static void
   9874 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9875 {
   9876 
   9877 	KASSERT(WM_CORE_LOCKED(sc));
   9878 
   9879 	if (sc->sc_flags & WM_F_HAS_MII)
   9880 		wm_linkintr_gmii(sc, icr);
   9881 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9882 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9883 		wm_linkintr_serdes(sc, icr);
   9884 	else
   9885 		wm_linkintr_tbi(sc, icr);
   9886 }
   9887 
   9888 
   9889 static inline void
   9890 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9891 {
   9892 
   9893 	if (wmq->wmq_txrx_use_workqueue)
   9894 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9895 	else
   9896 		softint_schedule(wmq->wmq_si);
   9897 }
   9898 
   9899 static inline void
   9900 wm_legacy_intr_disable(struct wm_softc *sc)
   9901 {
   9902 
   9903 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   9904 }
   9905 
   9906 static inline void
   9907 wm_legacy_intr_enable(struct wm_softc *sc)
   9908 {
   9909 
   9910 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   9911 }
   9912 
   9913 /*
   9914  * wm_intr_legacy:
   9915  *
   9916  *	Interrupt service routine for INTx and MSI.
   9917  */
   9918 static int
   9919 wm_intr_legacy(void *arg)
   9920 {
   9921 	struct wm_softc *sc = arg;
   9922 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9923 	struct wm_queue *wmq = &sc->sc_queue[0];
   9924 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9925 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9926 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9927 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9928 	uint32_t icr, rndval = 0;
   9929 	bool more = false;
   9930 
   9931 	icr = CSR_READ(sc, WMREG_ICR);
   9932 	if ((icr & sc->sc_icr) == 0)
   9933 		return 0;
   9934 
   9935 	DPRINTF(sc, WM_DEBUG_TX,
   9936 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9937 	if (rndval == 0)
   9938 		rndval = icr;
   9939 
   9940 	mutex_enter(rxq->rxq_lock);
   9941 
   9942 	if (rxq->rxq_stopping) {
   9943 		mutex_exit(rxq->rxq_lock);
   9944 		return 1;
   9945 	}
   9946 
   9947 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9948 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9949 		DPRINTF(sc, WM_DEBUG_RX,
   9950 		    ("%s: RX: got Rx intr 0x%08x\n",
   9951 			device_xname(sc->sc_dev),
   9952 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   9953 		WM_Q_EVCNT_INCR(rxq, intr);
   9954 	}
   9955 #endif
   9956 	/*
   9957 	 * wm_rxeof() does *not* call upper layer functions directly,
   9958 	 * as if_percpuq_enqueue() just call softint_schedule().
   9959 	 * So, we can call wm_rxeof() in interrupt context.
   9960 	 */
   9961 	more = wm_rxeof(rxq, rxlimit);
   9962 
   9963 	mutex_exit(rxq->rxq_lock);
   9964 	mutex_enter(txq->txq_lock);
   9965 
   9966 	if (txq->txq_stopping) {
   9967 		mutex_exit(txq->txq_lock);
   9968 		return 1;
   9969 	}
   9970 
   9971 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9972 	if (icr & ICR_TXDW) {
   9973 		DPRINTF(sc, WM_DEBUG_TX,
   9974 		    ("%s: TX: got TXDW interrupt\n",
   9975 			device_xname(sc->sc_dev)));
   9976 		WM_Q_EVCNT_INCR(txq, txdw);
   9977 	}
   9978 #endif
   9979 	more |= wm_txeof(txq, txlimit);
   9980 	if (!IF_IS_EMPTY(&ifp->if_snd))
   9981 		more = true;
   9982 
   9983 	mutex_exit(txq->txq_lock);
   9984 	WM_CORE_LOCK(sc);
   9985 
   9986 	if (sc->sc_core_stopping) {
   9987 		WM_CORE_UNLOCK(sc);
   9988 		return 1;
   9989 	}
   9990 
   9991 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9992 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9993 		wm_linkintr(sc, icr);
   9994 	}
   9995 	if ((icr & ICR_GPI(0)) != 0)
   9996 		device_printf(sc->sc_dev, "got module interrupt\n");
   9997 
   9998 	WM_CORE_UNLOCK(sc);
   9999 
   10000 	if (icr & ICR_RXO) {
   10001 #if defined(WM_DEBUG)
   10002 		log(LOG_WARNING, "%s: Receive overrun\n",
   10003 		    device_xname(sc->sc_dev));
   10004 #endif /* defined(WM_DEBUG) */
   10005 	}
   10006 
   10007 	rnd_add_uint32(&sc->rnd_source, rndval);
   10008 
   10009 	if (more) {
   10010 		/* Try to get more packets going. */
   10011 		wm_legacy_intr_disable(sc);
   10012 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10013 		wm_sched_handle_queue(sc, wmq);
   10014 	}
   10015 
   10016 	return 1;
   10017 }
   10018 
   10019 static inline void
   10020 wm_txrxintr_disable(struct wm_queue *wmq)
   10021 {
   10022 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10023 
   10024 	if (__predict_false(!wm_is_using_msix(sc))) {
   10025 		return wm_legacy_intr_disable(sc);
   10026 	}
   10027 
   10028 	if (sc->sc_type == WM_T_82574)
   10029 		CSR_WRITE(sc, WMREG_IMC,
   10030 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   10031 	else if (sc->sc_type == WM_T_82575)
   10032 		CSR_WRITE(sc, WMREG_EIMC,
   10033 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10034 	else
   10035 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   10036 }
   10037 
   10038 static inline void
   10039 wm_txrxintr_enable(struct wm_queue *wmq)
   10040 {
   10041 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10042 
   10043 	wm_itrs_calculate(sc, wmq);
   10044 
   10045 	if (__predict_false(!wm_is_using_msix(sc))) {
   10046 		return wm_legacy_intr_enable(sc);
   10047 	}
   10048 
   10049 	/*
   10050 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   10051 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   10052 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   10053 	 * while each wm_handle_queue(wmq) is runnig.
   10054 	 */
   10055 	if (sc->sc_type == WM_T_82574)
   10056 		CSR_WRITE(sc, WMREG_IMS,
   10057 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   10058 	else if (sc->sc_type == WM_T_82575)
   10059 		CSR_WRITE(sc, WMREG_EIMS,
   10060 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10061 	else
   10062 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   10063 }
   10064 
   10065 static int
   10066 wm_txrxintr_msix(void *arg)
   10067 {
   10068 	struct wm_queue *wmq = arg;
   10069 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10070 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10071 	struct wm_softc *sc = txq->txq_sc;
   10072 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10073 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10074 	bool txmore;
   10075 	bool rxmore;
   10076 
   10077 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   10078 
   10079 	DPRINTF(sc, WM_DEBUG_TX,
   10080 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   10081 
   10082 	wm_txrxintr_disable(wmq);
   10083 
   10084 	mutex_enter(txq->txq_lock);
   10085 
   10086 	if (txq->txq_stopping) {
   10087 		mutex_exit(txq->txq_lock);
   10088 		return 1;
   10089 	}
   10090 
   10091 	WM_Q_EVCNT_INCR(txq, txdw);
   10092 	txmore = wm_txeof(txq, txlimit);
   10093 	/* wm_deferred start() is done in wm_handle_queue(). */
   10094 	mutex_exit(txq->txq_lock);
   10095 
   10096 	DPRINTF(sc, WM_DEBUG_RX,
   10097 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   10098 	mutex_enter(rxq->rxq_lock);
   10099 
   10100 	if (rxq->rxq_stopping) {
   10101 		mutex_exit(rxq->rxq_lock);
   10102 		return 1;
   10103 	}
   10104 
   10105 	WM_Q_EVCNT_INCR(rxq, intr);
   10106 	rxmore = wm_rxeof(rxq, rxlimit);
   10107 	mutex_exit(rxq->rxq_lock);
   10108 
   10109 	wm_itrs_writereg(sc, wmq);
   10110 
   10111 	if (txmore || rxmore) {
   10112 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10113 		wm_sched_handle_queue(sc, wmq);
   10114 	} else
   10115 		wm_txrxintr_enable(wmq);
   10116 
   10117 	return 1;
   10118 }
   10119 
   10120 static void
   10121 wm_handle_queue(void *arg)
   10122 {
   10123 	struct wm_queue *wmq = arg;
   10124 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10125 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10126 	struct wm_softc *sc = txq->txq_sc;
   10127 	u_int txlimit = sc->sc_tx_process_limit;
   10128 	u_int rxlimit = sc->sc_rx_process_limit;
   10129 	bool txmore;
   10130 	bool rxmore;
   10131 
   10132 	mutex_enter(txq->txq_lock);
   10133 	if (txq->txq_stopping) {
   10134 		mutex_exit(txq->txq_lock);
   10135 		return;
   10136 	}
   10137 	txmore = wm_txeof(txq, txlimit);
   10138 	wm_deferred_start_locked(txq);
   10139 	mutex_exit(txq->txq_lock);
   10140 
   10141 	mutex_enter(rxq->rxq_lock);
   10142 	if (rxq->rxq_stopping) {
   10143 		mutex_exit(rxq->rxq_lock);
   10144 		return;
   10145 	}
   10146 	WM_Q_EVCNT_INCR(rxq, defer);
   10147 	rxmore = wm_rxeof(rxq, rxlimit);
   10148 	mutex_exit(rxq->rxq_lock);
   10149 
   10150 	if (txmore || rxmore) {
   10151 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10152 		wm_sched_handle_queue(sc, wmq);
   10153 	} else
   10154 		wm_txrxintr_enable(wmq);
   10155 }
   10156 
   10157 static void
   10158 wm_handle_queue_work(struct work *wk, void *context)
   10159 {
   10160 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10161 
   10162 	/*
   10163 	 * "enqueued flag" is not required here.
   10164 	 */
   10165 	wm_handle_queue(wmq);
   10166 }
   10167 
   10168 /*
   10169  * wm_linkintr_msix:
   10170  *
   10171  *	Interrupt service routine for link status change for MSI-X.
   10172  */
   10173 static int
   10174 wm_linkintr_msix(void *arg)
   10175 {
   10176 	struct wm_softc *sc = arg;
   10177 	uint32_t reg;
   10178 	bool has_rxo;
   10179 
   10180 	reg = CSR_READ(sc, WMREG_ICR);
   10181 	WM_CORE_LOCK(sc);
   10182 	DPRINTF(sc, WM_DEBUG_LINK,
   10183 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10184 		device_xname(sc->sc_dev), reg));
   10185 
   10186 	if (sc->sc_core_stopping)
   10187 		goto out;
   10188 
   10189 	if ((reg & ICR_LSC) != 0) {
   10190 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10191 		wm_linkintr(sc, ICR_LSC);
   10192 	}
   10193 	if ((reg & ICR_GPI(0)) != 0)
   10194 		device_printf(sc->sc_dev, "got module interrupt\n");
   10195 
   10196 	/*
   10197 	 * XXX 82574 MSI-X mode workaround
   10198 	 *
   10199 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10200 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10201 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10202 	 * interrupts by writing WMREG_ICS to process receive packets.
   10203 	 */
   10204 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10205 #if defined(WM_DEBUG)
   10206 		log(LOG_WARNING, "%s: Receive overrun\n",
   10207 		    device_xname(sc->sc_dev));
   10208 #endif /* defined(WM_DEBUG) */
   10209 
   10210 		has_rxo = true;
   10211 		/*
   10212 		 * The RXO interrupt is very high rate when receive traffic is
   10213 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10214 		 * interrupts. ICR_OTHER will be enabled at the end of
   10215 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10216 		 * ICR_RXQ(1) interrupts.
   10217 		 */
   10218 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10219 
   10220 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10221 	}
   10222 
   10223 
   10224 
   10225 out:
   10226 	WM_CORE_UNLOCK(sc);
   10227 
   10228 	if (sc->sc_type == WM_T_82574) {
   10229 		if (!has_rxo)
   10230 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10231 		else
   10232 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10233 	} else if (sc->sc_type == WM_T_82575)
   10234 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10235 	else
   10236 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10237 
   10238 	return 1;
   10239 }
   10240 
   10241 /*
   10242  * Media related.
   10243  * GMII, SGMII, TBI (and SERDES)
   10244  */
   10245 
   10246 /* Common */
   10247 
   10248 /*
   10249  * wm_tbi_serdes_set_linkled:
   10250  *
   10251  *	Update the link LED on TBI and SERDES devices.
   10252  */
   10253 static void
   10254 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10255 {
   10256 
   10257 	if (sc->sc_tbi_linkup)
   10258 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10259 	else
   10260 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10261 
   10262 	/* 82540 or newer devices are active low */
   10263 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10264 
   10265 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10266 }
   10267 
   10268 /* GMII related */
   10269 
   10270 /*
   10271  * wm_gmii_reset:
   10272  *
   10273  *	Reset the PHY.
   10274  */
   10275 static void
   10276 wm_gmii_reset(struct wm_softc *sc)
   10277 {
   10278 	uint32_t reg;
   10279 	int rv;
   10280 
   10281 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10282 		device_xname(sc->sc_dev), __func__));
   10283 
   10284 	rv = sc->phy.acquire(sc);
   10285 	if (rv != 0) {
   10286 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10287 		    __func__);
   10288 		return;
   10289 	}
   10290 
   10291 	switch (sc->sc_type) {
   10292 	case WM_T_82542_2_0:
   10293 	case WM_T_82542_2_1:
   10294 		/* null */
   10295 		break;
   10296 	case WM_T_82543:
   10297 		/*
   10298 		 * With 82543, we need to force speed and duplex on the MAC
   10299 		 * equal to what the PHY speed and duplex configuration is.
   10300 		 * In addition, we need to perform a hardware reset on the PHY
   10301 		 * to take it out of reset.
   10302 		 */
   10303 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10304 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10305 
   10306 		/* The PHY reset pin is active-low. */
   10307 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10308 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10309 		    CTRL_EXT_SWDPIN(4));
   10310 		reg |= CTRL_EXT_SWDPIO(4);
   10311 
   10312 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10313 		CSR_WRITE_FLUSH(sc);
   10314 		delay(10*1000);
   10315 
   10316 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10317 		CSR_WRITE_FLUSH(sc);
   10318 		delay(150);
   10319 #if 0
   10320 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10321 #endif
   10322 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10323 		break;
   10324 	case WM_T_82544:	/* Reset 10000us */
   10325 	case WM_T_82540:
   10326 	case WM_T_82545:
   10327 	case WM_T_82545_3:
   10328 	case WM_T_82546:
   10329 	case WM_T_82546_3:
   10330 	case WM_T_82541:
   10331 	case WM_T_82541_2:
   10332 	case WM_T_82547:
   10333 	case WM_T_82547_2:
   10334 	case WM_T_82571:	/* Reset 100us */
   10335 	case WM_T_82572:
   10336 	case WM_T_82573:
   10337 	case WM_T_82574:
   10338 	case WM_T_82575:
   10339 	case WM_T_82576:
   10340 	case WM_T_82580:
   10341 	case WM_T_I350:
   10342 	case WM_T_I354:
   10343 	case WM_T_I210:
   10344 	case WM_T_I211:
   10345 	case WM_T_82583:
   10346 	case WM_T_80003:
   10347 		/* Generic reset */
   10348 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10349 		CSR_WRITE_FLUSH(sc);
   10350 		delay(20000);
   10351 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10352 		CSR_WRITE_FLUSH(sc);
   10353 		delay(20000);
   10354 
   10355 		if ((sc->sc_type == WM_T_82541)
   10356 		    || (sc->sc_type == WM_T_82541_2)
   10357 		    || (sc->sc_type == WM_T_82547)
   10358 		    || (sc->sc_type == WM_T_82547_2)) {
   10359 			/* Workaround for igp are done in igp_reset() */
   10360 			/* XXX add code to set LED after phy reset */
   10361 		}
   10362 		break;
   10363 	case WM_T_ICH8:
   10364 	case WM_T_ICH9:
   10365 	case WM_T_ICH10:
   10366 	case WM_T_PCH:
   10367 	case WM_T_PCH2:
   10368 	case WM_T_PCH_LPT:
   10369 	case WM_T_PCH_SPT:
   10370 	case WM_T_PCH_CNP:
   10371 		/* Generic reset */
   10372 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10373 		CSR_WRITE_FLUSH(sc);
   10374 		delay(100);
   10375 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10376 		CSR_WRITE_FLUSH(sc);
   10377 		delay(150);
   10378 		break;
   10379 	default:
   10380 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10381 		    __func__);
   10382 		break;
   10383 	}
   10384 
   10385 	sc->phy.release(sc);
   10386 
   10387 	/* get_cfg_done */
   10388 	wm_get_cfg_done(sc);
   10389 
   10390 	/* Extra setup */
   10391 	switch (sc->sc_type) {
   10392 	case WM_T_82542_2_0:
   10393 	case WM_T_82542_2_1:
   10394 	case WM_T_82543:
   10395 	case WM_T_82544:
   10396 	case WM_T_82540:
   10397 	case WM_T_82545:
   10398 	case WM_T_82545_3:
   10399 	case WM_T_82546:
   10400 	case WM_T_82546_3:
   10401 	case WM_T_82541_2:
   10402 	case WM_T_82547_2:
   10403 	case WM_T_82571:
   10404 	case WM_T_82572:
   10405 	case WM_T_82573:
   10406 	case WM_T_82574:
   10407 	case WM_T_82583:
   10408 	case WM_T_82575:
   10409 	case WM_T_82576:
   10410 	case WM_T_82580:
   10411 	case WM_T_I350:
   10412 	case WM_T_I354:
   10413 	case WM_T_I210:
   10414 	case WM_T_I211:
   10415 	case WM_T_80003:
   10416 		/* Null */
   10417 		break;
   10418 	case WM_T_82541:
   10419 	case WM_T_82547:
   10420 		/* XXX Configure actively LED after PHY reset */
   10421 		break;
   10422 	case WM_T_ICH8:
   10423 	case WM_T_ICH9:
   10424 	case WM_T_ICH10:
   10425 	case WM_T_PCH:
   10426 	case WM_T_PCH2:
   10427 	case WM_T_PCH_LPT:
   10428 	case WM_T_PCH_SPT:
   10429 	case WM_T_PCH_CNP:
   10430 		wm_phy_post_reset(sc);
   10431 		break;
   10432 	default:
   10433 		panic("%s: unknown type\n", __func__);
   10434 		break;
   10435 	}
   10436 }
   10437 
   10438 /*
   10439  * Setup sc_phytype and mii_{read|write}reg.
   10440  *
   10441  *  To identify PHY type, correct read/write function should be selected.
   10442  * To select correct read/write function, PCI ID or MAC type are required
   10443  * without accessing PHY registers.
   10444  *
   10445  *  On the first call of this function, PHY ID is not known yet. Check
   10446  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10447  * result might be incorrect.
   10448  *
   10449  *  In the second call, PHY OUI and model is used to identify PHY type.
   10450  * It might not be perfect because of the lack of compared entry, but it
   10451  * would be better than the first call.
   10452  *
   10453  *  If the detected new result and previous assumption is different,
   10454  * diagnous message will be printed.
   10455  */
   10456 static void
   10457 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10458     uint16_t phy_model)
   10459 {
   10460 	device_t dev = sc->sc_dev;
   10461 	struct mii_data *mii = &sc->sc_mii;
   10462 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10463 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10464 	mii_readreg_t new_readreg;
   10465 	mii_writereg_t new_writereg;
   10466 	bool dodiag = true;
   10467 
   10468 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10469 		device_xname(sc->sc_dev), __func__));
   10470 
   10471 	/*
   10472 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10473 	 * incorrect. So don't print diag output when it's 2nd call.
   10474 	 */
   10475 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10476 		dodiag = false;
   10477 
   10478 	if (mii->mii_readreg == NULL) {
   10479 		/*
   10480 		 *  This is the first call of this function. For ICH and PCH
   10481 		 * variants, it's difficult to determine the PHY access method
   10482 		 * by sc_type, so use the PCI product ID for some devices.
   10483 		 */
   10484 
   10485 		switch (sc->sc_pcidevid) {
   10486 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10487 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10488 			/* 82577 */
   10489 			new_phytype = WMPHY_82577;
   10490 			break;
   10491 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10492 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10493 			/* 82578 */
   10494 			new_phytype = WMPHY_82578;
   10495 			break;
   10496 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10497 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10498 			/* 82579 */
   10499 			new_phytype = WMPHY_82579;
   10500 			break;
   10501 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10502 		case PCI_PRODUCT_INTEL_82801I_BM:
   10503 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10504 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10505 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10506 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10507 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10508 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10509 			/* ICH8, 9, 10 with 82567 */
   10510 			new_phytype = WMPHY_BM;
   10511 			break;
   10512 		default:
   10513 			break;
   10514 		}
   10515 	} else {
   10516 		/* It's not the first call. Use PHY OUI and model */
   10517 		switch (phy_oui) {
   10518 		case MII_OUI_ATTANSIC: /* atphy(4) */
   10519 			switch (phy_model) {
   10520 			case MII_MODEL_ATTANSIC_AR8021:
   10521 				new_phytype = WMPHY_82578;
   10522 				break;
   10523 			default:
   10524 				break;
   10525 			}
   10526 			break;
   10527 		case MII_OUI_xxMARVELL:
   10528 			switch (phy_model) {
   10529 			case MII_MODEL_xxMARVELL_I210:
   10530 				new_phytype = WMPHY_I210;
   10531 				break;
   10532 			case MII_MODEL_xxMARVELL_E1011:
   10533 			case MII_MODEL_xxMARVELL_E1000_3:
   10534 			case MII_MODEL_xxMARVELL_E1000_5:
   10535 			case MII_MODEL_xxMARVELL_E1112:
   10536 				new_phytype = WMPHY_M88;
   10537 				break;
   10538 			case MII_MODEL_xxMARVELL_E1149:
   10539 				new_phytype = WMPHY_BM;
   10540 				break;
   10541 			case MII_MODEL_xxMARVELL_E1111:
   10542 			case MII_MODEL_xxMARVELL_I347:
   10543 			case MII_MODEL_xxMARVELL_E1512:
   10544 			case MII_MODEL_xxMARVELL_E1340M:
   10545 			case MII_MODEL_xxMARVELL_E1543:
   10546 				new_phytype = WMPHY_M88;
   10547 				break;
   10548 			case MII_MODEL_xxMARVELL_I82563:
   10549 				new_phytype = WMPHY_GG82563;
   10550 				break;
   10551 			default:
   10552 				break;
   10553 			}
   10554 			break;
   10555 		case MII_OUI_INTEL:
   10556 			switch (phy_model) {
   10557 			case MII_MODEL_INTEL_I82577:
   10558 				new_phytype = WMPHY_82577;
   10559 				break;
   10560 			case MII_MODEL_INTEL_I82579:
   10561 				new_phytype = WMPHY_82579;
   10562 				break;
   10563 			case MII_MODEL_INTEL_I217:
   10564 				new_phytype = WMPHY_I217;
   10565 				break;
   10566 			case MII_MODEL_INTEL_I82580:
   10567 				new_phytype = WMPHY_82580;
   10568 				break;
   10569 			case MII_MODEL_INTEL_I350:
   10570 				new_phytype = WMPHY_I350;
   10571 				break;
   10572 				break;
   10573 			default:
   10574 				break;
   10575 			}
   10576 			break;
   10577 		case MII_OUI_yyINTEL:
   10578 			switch (phy_model) {
   10579 			case MII_MODEL_yyINTEL_I82562G:
   10580 			case MII_MODEL_yyINTEL_I82562EM:
   10581 			case MII_MODEL_yyINTEL_I82562ET:
   10582 				new_phytype = WMPHY_IFE;
   10583 				break;
   10584 			case MII_MODEL_yyINTEL_IGP01E1000:
   10585 				new_phytype = WMPHY_IGP;
   10586 				break;
   10587 			case MII_MODEL_yyINTEL_I82566:
   10588 				new_phytype = WMPHY_IGP_3;
   10589 				break;
   10590 			default:
   10591 				break;
   10592 			}
   10593 			break;
   10594 		default:
   10595 			break;
   10596 		}
   10597 
   10598 		if (dodiag) {
   10599 			if (new_phytype == WMPHY_UNKNOWN)
   10600 				aprint_verbose_dev(dev,
   10601 				    "%s: Unknown PHY model. OUI=%06x, "
   10602 				    "model=%04x\n", __func__, phy_oui,
   10603 				    phy_model);
   10604 
   10605 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10606 			    && (sc->sc_phytype != new_phytype)) {
   10607 				aprint_error_dev(dev, "Previously assumed PHY "
   10608 				    "type(%u) was incorrect. PHY type from PHY"
   10609 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10610 			}
   10611 		}
   10612 	}
   10613 
   10614 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10615 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10616 		/* SGMII */
   10617 		new_readreg = wm_sgmii_readreg;
   10618 		new_writereg = wm_sgmii_writereg;
   10619 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10620 		/* BM2 (phyaddr == 1) */
   10621 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10622 		    && (new_phytype != WMPHY_BM)
   10623 		    && (new_phytype != WMPHY_UNKNOWN))
   10624 			doubt_phytype = new_phytype;
   10625 		new_phytype = WMPHY_BM;
   10626 		new_readreg = wm_gmii_bm_readreg;
   10627 		new_writereg = wm_gmii_bm_writereg;
   10628 	} else if (sc->sc_type >= WM_T_PCH) {
   10629 		/* All PCH* use _hv_ */
   10630 		new_readreg = wm_gmii_hv_readreg;
   10631 		new_writereg = wm_gmii_hv_writereg;
   10632 	} else if (sc->sc_type >= WM_T_ICH8) {
   10633 		/* non-82567 ICH8, 9 and 10 */
   10634 		new_readreg = wm_gmii_i82544_readreg;
   10635 		new_writereg = wm_gmii_i82544_writereg;
   10636 	} else if (sc->sc_type >= WM_T_80003) {
   10637 		/* 80003 */
   10638 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10639 		    && (new_phytype != WMPHY_GG82563)
   10640 		    && (new_phytype != WMPHY_UNKNOWN))
   10641 			doubt_phytype = new_phytype;
   10642 		new_phytype = WMPHY_GG82563;
   10643 		new_readreg = wm_gmii_i80003_readreg;
   10644 		new_writereg = wm_gmii_i80003_writereg;
   10645 	} else if (sc->sc_type >= WM_T_I210) {
   10646 		/* I210 and I211 */
   10647 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10648 		    && (new_phytype != WMPHY_I210)
   10649 		    && (new_phytype != WMPHY_UNKNOWN))
   10650 			doubt_phytype = new_phytype;
   10651 		new_phytype = WMPHY_I210;
   10652 		new_readreg = wm_gmii_gs40g_readreg;
   10653 		new_writereg = wm_gmii_gs40g_writereg;
   10654 	} else if (sc->sc_type >= WM_T_82580) {
   10655 		/* 82580, I350 and I354 */
   10656 		new_readreg = wm_gmii_82580_readreg;
   10657 		new_writereg = wm_gmii_82580_writereg;
   10658 	} else if (sc->sc_type >= WM_T_82544) {
   10659 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10660 		new_readreg = wm_gmii_i82544_readreg;
   10661 		new_writereg = wm_gmii_i82544_writereg;
   10662 	} else {
   10663 		new_readreg = wm_gmii_i82543_readreg;
   10664 		new_writereg = wm_gmii_i82543_writereg;
   10665 	}
   10666 
   10667 	if (new_phytype == WMPHY_BM) {
   10668 		/* All BM use _bm_ */
   10669 		new_readreg = wm_gmii_bm_readreg;
   10670 		new_writereg = wm_gmii_bm_writereg;
   10671 	}
   10672 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10673 		/* All PCH* use _hv_ */
   10674 		new_readreg = wm_gmii_hv_readreg;
   10675 		new_writereg = wm_gmii_hv_writereg;
   10676 	}
   10677 
   10678 	/* Diag output */
   10679 	if (dodiag) {
   10680 		if (doubt_phytype != WMPHY_UNKNOWN)
   10681 			aprint_error_dev(dev, "Assumed new PHY type was "
   10682 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10683 			    new_phytype);
   10684 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10685 		    && (sc->sc_phytype != new_phytype))
   10686 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10687 			    "was incorrect. New PHY type = %u\n",
   10688 			    sc->sc_phytype, new_phytype);
   10689 
   10690 		if ((mii->mii_readreg != NULL) &&
   10691 		    (new_phytype == WMPHY_UNKNOWN))
   10692 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10693 
   10694 		if ((mii->mii_readreg != NULL) &&
   10695 		    (mii->mii_readreg != new_readreg))
   10696 			aprint_error_dev(dev, "Previously assumed PHY "
   10697 			    "read/write function was incorrect.\n");
   10698 	}
   10699 
   10700 	/* Update now */
   10701 	sc->sc_phytype = new_phytype;
   10702 	mii->mii_readreg = new_readreg;
   10703 	mii->mii_writereg = new_writereg;
   10704 	if (new_readreg == wm_gmii_hv_readreg) {
   10705 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10706 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10707 	} else if (new_readreg == wm_sgmii_readreg) {
   10708 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10709 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10710 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10711 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10712 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10713 	}
   10714 }
   10715 
   10716 /*
   10717  * wm_get_phy_id_82575:
   10718  *
   10719  * Return PHY ID. Return -1 if it failed.
   10720  */
   10721 static int
   10722 wm_get_phy_id_82575(struct wm_softc *sc)
   10723 {
   10724 	uint32_t reg;
   10725 	int phyid = -1;
   10726 
   10727 	/* XXX */
   10728 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10729 		return -1;
   10730 
   10731 	if (wm_sgmii_uses_mdio(sc)) {
   10732 		switch (sc->sc_type) {
   10733 		case WM_T_82575:
   10734 		case WM_T_82576:
   10735 			reg = CSR_READ(sc, WMREG_MDIC);
   10736 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10737 			break;
   10738 		case WM_T_82580:
   10739 		case WM_T_I350:
   10740 		case WM_T_I354:
   10741 		case WM_T_I210:
   10742 		case WM_T_I211:
   10743 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10744 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10745 			break;
   10746 		default:
   10747 			return -1;
   10748 		}
   10749 	}
   10750 
   10751 	return phyid;
   10752 }
   10753 
   10754 /*
   10755  * wm_gmii_mediainit:
   10756  *
   10757  *	Initialize media for use on 1000BASE-T devices.
   10758  */
   10759 static void
   10760 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10761 {
   10762 	device_t dev = sc->sc_dev;
   10763 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10764 	struct mii_data *mii = &sc->sc_mii;
   10765 
   10766 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10767 		device_xname(sc->sc_dev), __func__));
   10768 
   10769 	/* We have GMII. */
   10770 	sc->sc_flags |= WM_F_HAS_MII;
   10771 
   10772 	if (sc->sc_type == WM_T_80003)
   10773 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10774 	else
   10775 		sc->sc_tipg = TIPG_1000T_DFLT;
   10776 
   10777 	/*
   10778 	 * Let the chip set speed/duplex on its own based on
   10779 	 * signals from the PHY.
   10780 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10781 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10782 	 */
   10783 	sc->sc_ctrl |= CTRL_SLU;
   10784 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10785 
   10786 	/* Initialize our media structures and probe the GMII. */
   10787 	mii->mii_ifp = ifp;
   10788 
   10789 	mii->mii_statchg = wm_gmii_statchg;
   10790 
   10791 	/* get PHY control from SMBus to PCIe */
   10792 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10793 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10794 	    || (sc->sc_type == WM_T_PCH_CNP))
   10795 		wm_init_phy_workarounds_pchlan(sc);
   10796 
   10797 	wm_gmii_reset(sc);
   10798 
   10799 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10800 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10801 	    wm_gmii_mediastatus, sc->sc_core_lock);
   10802 
   10803 	/* Setup internal SGMII PHY for SFP */
   10804 	wm_sgmii_sfp_preconfig(sc);
   10805 
   10806 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10807 	    || (sc->sc_type == WM_T_82580)
   10808 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10809 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10810 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10811 			/* Attach only one port */
   10812 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10813 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10814 		} else {
   10815 			int i, id;
   10816 			uint32_t ctrl_ext;
   10817 
   10818 			id = wm_get_phy_id_82575(sc);
   10819 			if (id != -1) {
   10820 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10821 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10822 			}
   10823 			if ((id == -1)
   10824 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10825 				/* Power on sgmii phy if it is disabled */
   10826 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10827 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10828 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10829 				CSR_WRITE_FLUSH(sc);
   10830 				delay(300*1000); /* XXX too long */
   10831 
   10832 				/*
   10833 				 * From 1 to 8.
   10834 				 *
   10835 				 * I2C access fails with I2C register's ERROR
   10836 				 * bit set, so prevent error message while
   10837 				 * scanning.
   10838 				 */
   10839 				sc->phy.no_errprint = true;
   10840 				for (i = 1; i < 8; i++)
   10841 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10842 					    0xffffffff, i, MII_OFFSET_ANY,
   10843 					    MIIF_DOPAUSE);
   10844 				sc->phy.no_errprint = false;
   10845 
   10846 				/* Restore previous sfp cage power state */
   10847 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10848 			}
   10849 		}
   10850 	} else
   10851 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10852 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10853 
   10854 	/*
   10855 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10856 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10857 	 */
   10858 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10859 		|| (sc->sc_type == WM_T_PCH_SPT)
   10860 		|| (sc->sc_type == WM_T_PCH_CNP))
   10861 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10862 		wm_set_mdio_slow_mode_hv(sc);
   10863 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10864 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10865 	}
   10866 
   10867 	/*
   10868 	 * (For ICH8 variants)
   10869 	 * If PHY detection failed, use BM's r/w function and retry.
   10870 	 */
   10871 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10872 		/* if failed, retry with *_bm_* */
   10873 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10874 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10875 		    sc->sc_phytype);
   10876 		sc->sc_phytype = WMPHY_BM;
   10877 		mii->mii_readreg = wm_gmii_bm_readreg;
   10878 		mii->mii_writereg = wm_gmii_bm_writereg;
   10879 
   10880 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10881 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10882 	}
   10883 
   10884 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10885 		/* Any PHY wasn't find */
   10886 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10887 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10888 		sc->sc_phytype = WMPHY_NONE;
   10889 	} else {
   10890 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10891 
   10892 		/*
   10893 		 * PHY Found! Check PHY type again by the second call of
   10894 		 * wm_gmii_setup_phytype.
   10895 		 */
   10896 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10897 		    child->mii_mpd_model);
   10898 
   10899 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10900 	}
   10901 }
   10902 
   10903 /*
   10904  * wm_gmii_mediachange:	[ifmedia interface function]
   10905  *
   10906  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10907  */
   10908 static int
   10909 wm_gmii_mediachange(struct ifnet *ifp)
   10910 {
   10911 	struct wm_softc *sc = ifp->if_softc;
   10912 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10913 	uint32_t reg;
   10914 	int rc;
   10915 
   10916 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10917 		device_xname(sc->sc_dev), __func__));
   10918 	if ((ifp->if_flags & IFF_UP) == 0)
   10919 		return 0;
   10920 
   10921 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10922 	if ((sc->sc_type == WM_T_82580)
   10923 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10924 	    || (sc->sc_type == WM_T_I211)) {
   10925 		reg = CSR_READ(sc, WMREG_PHPM);
   10926 		reg &= ~PHPM_GO_LINK_D;
   10927 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10928 	}
   10929 
   10930 	/* Disable D0 LPLU. */
   10931 	wm_lplu_d0_disable(sc);
   10932 
   10933 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10934 	sc->sc_ctrl |= CTRL_SLU;
   10935 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10936 	    || (sc->sc_type > WM_T_82543)) {
   10937 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10938 	} else {
   10939 		sc->sc_ctrl &= ~CTRL_ASDE;
   10940 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10941 		if (ife->ifm_media & IFM_FDX)
   10942 			sc->sc_ctrl |= CTRL_FD;
   10943 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10944 		case IFM_10_T:
   10945 			sc->sc_ctrl |= CTRL_SPEED_10;
   10946 			break;
   10947 		case IFM_100_TX:
   10948 			sc->sc_ctrl |= CTRL_SPEED_100;
   10949 			break;
   10950 		case IFM_1000_T:
   10951 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10952 			break;
   10953 		case IFM_NONE:
   10954 			/* There is no specific setting for IFM_NONE */
   10955 			break;
   10956 		default:
   10957 			panic("wm_gmii_mediachange: bad media 0x%x",
   10958 			    ife->ifm_media);
   10959 		}
   10960 	}
   10961 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10962 	CSR_WRITE_FLUSH(sc);
   10963 
   10964 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10965 		wm_serdes_mediachange(ifp);
   10966 
   10967 	if (sc->sc_type <= WM_T_82543)
   10968 		wm_gmii_reset(sc);
   10969 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10970 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10971 		/* allow time for SFP cage time to power up phy */
   10972 		delay(300 * 1000);
   10973 		wm_gmii_reset(sc);
   10974 	}
   10975 
   10976 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10977 		return 0;
   10978 	return rc;
   10979 }
   10980 
   10981 /*
   10982  * wm_gmii_mediastatus:	[ifmedia interface function]
   10983  *
   10984  *	Get the current interface media status on a 1000BASE-T device.
   10985  */
   10986 static void
   10987 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10988 {
   10989 	struct wm_softc *sc = ifp->if_softc;
   10990 
   10991 	ether_mediastatus(ifp, ifmr);
   10992 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10993 	    | sc->sc_flowflags;
   10994 }
   10995 
   10996 #define	MDI_IO		CTRL_SWDPIN(2)
   10997 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10998 #define	MDI_CLK		CTRL_SWDPIN(3)
   10999 
   11000 static void
   11001 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   11002 {
   11003 	uint32_t i, v;
   11004 
   11005 	v = CSR_READ(sc, WMREG_CTRL);
   11006 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11007 	v |= MDI_DIR | CTRL_SWDPIO(3);
   11008 
   11009 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   11010 		if (data & i)
   11011 			v |= MDI_IO;
   11012 		else
   11013 			v &= ~MDI_IO;
   11014 		CSR_WRITE(sc, WMREG_CTRL, v);
   11015 		CSR_WRITE_FLUSH(sc);
   11016 		delay(10);
   11017 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11018 		CSR_WRITE_FLUSH(sc);
   11019 		delay(10);
   11020 		CSR_WRITE(sc, WMREG_CTRL, v);
   11021 		CSR_WRITE_FLUSH(sc);
   11022 		delay(10);
   11023 	}
   11024 }
   11025 
   11026 static uint16_t
   11027 wm_i82543_mii_recvbits(struct wm_softc *sc)
   11028 {
   11029 	uint32_t v, i;
   11030 	uint16_t data = 0;
   11031 
   11032 	v = CSR_READ(sc, WMREG_CTRL);
   11033 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11034 	v |= CTRL_SWDPIO(3);
   11035 
   11036 	CSR_WRITE(sc, WMREG_CTRL, v);
   11037 	CSR_WRITE_FLUSH(sc);
   11038 	delay(10);
   11039 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11040 	CSR_WRITE_FLUSH(sc);
   11041 	delay(10);
   11042 	CSR_WRITE(sc, WMREG_CTRL, v);
   11043 	CSR_WRITE_FLUSH(sc);
   11044 	delay(10);
   11045 
   11046 	for (i = 0; i < 16; i++) {
   11047 		data <<= 1;
   11048 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11049 		CSR_WRITE_FLUSH(sc);
   11050 		delay(10);
   11051 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   11052 			data |= 1;
   11053 		CSR_WRITE(sc, WMREG_CTRL, v);
   11054 		CSR_WRITE_FLUSH(sc);
   11055 		delay(10);
   11056 	}
   11057 
   11058 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11059 	CSR_WRITE_FLUSH(sc);
   11060 	delay(10);
   11061 	CSR_WRITE(sc, WMREG_CTRL, v);
   11062 	CSR_WRITE_FLUSH(sc);
   11063 	delay(10);
   11064 
   11065 	return data;
   11066 }
   11067 
   11068 #undef MDI_IO
   11069 #undef MDI_DIR
   11070 #undef MDI_CLK
   11071 
   11072 /*
   11073  * wm_gmii_i82543_readreg:	[mii interface function]
   11074  *
   11075  *	Read a PHY register on the GMII (i82543 version).
   11076  */
   11077 static int
   11078 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11079 {
   11080 	struct wm_softc *sc = device_private(dev);
   11081 
   11082 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11083 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   11084 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   11085 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   11086 
   11087 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   11088 		device_xname(dev), phy, reg, *val));
   11089 
   11090 	return 0;
   11091 }
   11092 
   11093 /*
   11094  * wm_gmii_i82543_writereg:	[mii interface function]
   11095  *
   11096  *	Write a PHY register on the GMII (i82543 version).
   11097  */
   11098 static int
   11099 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   11100 {
   11101 	struct wm_softc *sc = device_private(dev);
   11102 
   11103 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11104 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   11105 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   11106 	    (MII_COMMAND_START << 30), 32);
   11107 
   11108 	return 0;
   11109 }
   11110 
   11111 /*
   11112  * wm_gmii_mdic_readreg:	[mii interface function]
   11113  *
   11114  *	Read a PHY register on the GMII.
   11115  */
   11116 static int
   11117 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11118 {
   11119 	struct wm_softc *sc = device_private(dev);
   11120 	uint32_t mdic = 0;
   11121 	int i;
   11122 
   11123 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11124 	    && (reg > MII_ADDRMASK)) {
   11125 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11126 		    __func__, sc->sc_phytype, reg);
   11127 		reg &= MII_ADDRMASK;
   11128 	}
   11129 
   11130 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   11131 	    MDIC_REGADD(reg));
   11132 
   11133 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11134 		delay(50);
   11135 		mdic = CSR_READ(sc, WMREG_MDIC);
   11136 		if (mdic & MDIC_READY)
   11137 			break;
   11138 	}
   11139 
   11140 	if ((mdic & MDIC_READY) == 0) {
   11141 		DPRINTF(sc, WM_DEBUG_GMII,
   11142 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   11143 			device_xname(dev), phy, reg));
   11144 		return ETIMEDOUT;
   11145 	} else if (mdic & MDIC_E) {
   11146 		/* This is normal if no PHY is present. */
   11147 		DPRINTF(sc, WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   11148 			device_xname(sc->sc_dev), phy, reg));
   11149 		return -1;
   11150 	} else
   11151 		*val = MDIC_DATA(mdic);
   11152 
   11153 	/*
   11154 	 * Allow some time after each MDIC transaction to avoid
   11155 	 * reading duplicate data in the next MDIC transaction.
   11156 	 */
   11157 	if (sc->sc_type == WM_T_PCH2)
   11158 		delay(100);
   11159 
   11160 	return 0;
   11161 }
   11162 
   11163 /*
   11164  * wm_gmii_mdic_writereg:	[mii interface function]
   11165  *
   11166  *	Write a PHY register on the GMII.
   11167  */
   11168 static int
   11169 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11170 {
   11171 	struct wm_softc *sc = device_private(dev);
   11172 	uint32_t mdic = 0;
   11173 	int i;
   11174 
   11175 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11176 	    && (reg > MII_ADDRMASK)) {
   11177 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11178 		    __func__, sc->sc_phytype, reg);
   11179 		reg &= MII_ADDRMASK;
   11180 	}
   11181 
   11182 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11183 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11184 
   11185 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11186 		delay(50);
   11187 		mdic = CSR_READ(sc, WMREG_MDIC);
   11188 		if (mdic & MDIC_READY)
   11189 			break;
   11190 	}
   11191 
   11192 	if ((mdic & MDIC_READY) == 0) {
   11193 		DPRINTF(sc, WM_DEBUG_GMII,
   11194 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11195 			device_xname(dev), phy, reg));
   11196 		return ETIMEDOUT;
   11197 	} else if (mdic & MDIC_E) {
   11198 		DPRINTF(sc, WM_DEBUG_GMII,
   11199 		    ("%s: MDIC write error: phy %d reg %d\n",
   11200 			device_xname(dev), phy, reg));
   11201 		return -1;
   11202 	}
   11203 
   11204 	/*
   11205 	 * Allow some time after each MDIC transaction to avoid
   11206 	 * reading duplicate data in the next MDIC transaction.
   11207 	 */
   11208 	if (sc->sc_type == WM_T_PCH2)
   11209 		delay(100);
   11210 
   11211 	return 0;
   11212 }
   11213 
   11214 /*
   11215  * wm_gmii_i82544_readreg:	[mii interface function]
   11216  *
   11217  *	Read a PHY register on the GMII.
   11218  */
   11219 static int
   11220 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11221 {
   11222 	struct wm_softc *sc = device_private(dev);
   11223 	int rv;
   11224 
   11225 	if (sc->phy.acquire(sc)) {
   11226 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11227 		return -1;
   11228 	}
   11229 
   11230 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11231 
   11232 	sc->phy.release(sc);
   11233 
   11234 	return rv;
   11235 }
   11236 
   11237 static int
   11238 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11239 {
   11240 	struct wm_softc *sc = device_private(dev);
   11241 	int rv;
   11242 
   11243 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11244 		switch (sc->sc_phytype) {
   11245 		case WMPHY_IGP:
   11246 		case WMPHY_IGP_2:
   11247 		case WMPHY_IGP_3:
   11248 			rv = wm_gmii_mdic_writereg(dev, phy,
   11249 			    IGPHY_PAGE_SELECT, reg);
   11250 			if (rv != 0)
   11251 				return rv;
   11252 			break;
   11253 		default:
   11254 #ifdef WM_DEBUG
   11255 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11256 			    __func__, sc->sc_phytype, reg);
   11257 #endif
   11258 			break;
   11259 		}
   11260 	}
   11261 
   11262 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11263 }
   11264 
   11265 /*
   11266  * wm_gmii_i82544_writereg:	[mii interface function]
   11267  *
   11268  *	Write a PHY register on the GMII.
   11269  */
   11270 static int
   11271 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11272 {
   11273 	struct wm_softc *sc = device_private(dev);
   11274 	int rv;
   11275 
   11276 	if (sc->phy.acquire(sc)) {
   11277 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11278 		return -1;
   11279 	}
   11280 
   11281 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11282 	sc->phy.release(sc);
   11283 
   11284 	return rv;
   11285 }
   11286 
   11287 static int
   11288 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11289 {
   11290 	struct wm_softc *sc = device_private(dev);
   11291 	int rv;
   11292 
   11293 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11294 		switch (sc->sc_phytype) {
   11295 		case WMPHY_IGP:
   11296 		case WMPHY_IGP_2:
   11297 		case WMPHY_IGP_3:
   11298 			rv = wm_gmii_mdic_writereg(dev, phy,
   11299 			    IGPHY_PAGE_SELECT, reg);
   11300 			if (rv != 0)
   11301 				return rv;
   11302 			break;
   11303 		default:
   11304 #ifdef WM_DEBUG
   11305 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11306 			    __func__, sc->sc_phytype, reg);
   11307 #endif
   11308 			break;
   11309 		}
   11310 	}
   11311 
   11312 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11313 }
   11314 
   11315 /*
   11316  * wm_gmii_i80003_readreg:	[mii interface function]
   11317  *
   11318  *	Read a PHY register on the kumeran
   11319  * This could be handled by the PHY layer if we didn't have to lock the
   11320  * resource ...
   11321  */
   11322 static int
   11323 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11324 {
   11325 	struct wm_softc *sc = device_private(dev);
   11326 	int page_select;
   11327 	uint16_t temp, temp2;
   11328 	int rv = 0;
   11329 
   11330 	if (phy != 1) /* Only one PHY on kumeran bus */
   11331 		return -1;
   11332 
   11333 	if (sc->phy.acquire(sc)) {
   11334 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11335 		return -1;
   11336 	}
   11337 
   11338 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11339 		page_select = GG82563_PHY_PAGE_SELECT;
   11340 	else {
   11341 		/*
   11342 		 * Use Alternative Page Select register to access registers
   11343 		 * 30 and 31.
   11344 		 */
   11345 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11346 	}
   11347 	temp = reg >> GG82563_PAGE_SHIFT;
   11348 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11349 		goto out;
   11350 
   11351 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11352 		/*
   11353 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11354 		 * register.
   11355 		 */
   11356 		delay(200);
   11357 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11358 		if ((rv != 0) || (temp2 != temp)) {
   11359 			device_printf(dev, "%s failed\n", __func__);
   11360 			rv = -1;
   11361 			goto out;
   11362 		}
   11363 		delay(200);
   11364 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11365 		delay(200);
   11366 	} else
   11367 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11368 
   11369 out:
   11370 	sc->phy.release(sc);
   11371 	return rv;
   11372 }
   11373 
   11374 /*
   11375  * wm_gmii_i80003_writereg:	[mii interface function]
   11376  *
   11377  *	Write a PHY register on the kumeran.
   11378  * This could be handled by the PHY layer if we didn't have to lock the
   11379  * resource ...
   11380  */
   11381 static int
   11382 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11383 {
   11384 	struct wm_softc *sc = device_private(dev);
   11385 	int page_select, rv;
   11386 	uint16_t temp, temp2;
   11387 
   11388 	if (phy != 1) /* Only one PHY on kumeran bus */
   11389 		return -1;
   11390 
   11391 	if (sc->phy.acquire(sc)) {
   11392 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11393 		return -1;
   11394 	}
   11395 
   11396 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11397 		page_select = GG82563_PHY_PAGE_SELECT;
   11398 	else {
   11399 		/*
   11400 		 * Use Alternative Page Select register to access registers
   11401 		 * 30 and 31.
   11402 		 */
   11403 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11404 	}
   11405 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11406 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11407 		goto out;
   11408 
   11409 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11410 		/*
   11411 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11412 		 * register.
   11413 		 */
   11414 		delay(200);
   11415 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11416 		if ((rv != 0) || (temp2 != temp)) {
   11417 			device_printf(dev, "%s failed\n", __func__);
   11418 			rv = -1;
   11419 			goto out;
   11420 		}
   11421 		delay(200);
   11422 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11423 		delay(200);
   11424 	} else
   11425 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11426 
   11427 out:
   11428 	sc->phy.release(sc);
   11429 	return rv;
   11430 }
   11431 
   11432 /*
   11433  * wm_gmii_bm_readreg:	[mii interface function]
   11434  *
   11435  *	Read a PHY register on the kumeran
   11436  * This could be handled by the PHY layer if we didn't have to lock the
   11437  * resource ...
   11438  */
   11439 static int
   11440 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11441 {
   11442 	struct wm_softc *sc = device_private(dev);
   11443 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11444 	int rv;
   11445 
   11446 	if (sc->phy.acquire(sc)) {
   11447 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11448 		return -1;
   11449 	}
   11450 
   11451 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11452 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11453 		    || (reg == 31)) ? 1 : phy;
   11454 	/* Page 800 works differently than the rest so it has its own func */
   11455 	if (page == BM_WUC_PAGE) {
   11456 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11457 		goto release;
   11458 	}
   11459 
   11460 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11461 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11462 		    && (sc->sc_type != WM_T_82583))
   11463 			rv = wm_gmii_mdic_writereg(dev, phy,
   11464 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11465 		else
   11466 			rv = wm_gmii_mdic_writereg(dev, phy,
   11467 			    BME1000_PHY_PAGE_SELECT, page);
   11468 		if (rv != 0)
   11469 			goto release;
   11470 	}
   11471 
   11472 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11473 
   11474 release:
   11475 	sc->phy.release(sc);
   11476 	return rv;
   11477 }
   11478 
   11479 /*
   11480  * wm_gmii_bm_writereg:	[mii interface function]
   11481  *
   11482  *	Write a PHY register on the kumeran.
   11483  * This could be handled by the PHY layer if we didn't have to lock the
   11484  * resource ...
   11485  */
   11486 static int
   11487 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11488 {
   11489 	struct wm_softc *sc = device_private(dev);
   11490 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11491 	int rv;
   11492 
   11493 	if (sc->phy.acquire(sc)) {
   11494 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11495 		return -1;
   11496 	}
   11497 
   11498 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11499 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11500 		    || (reg == 31)) ? 1 : phy;
   11501 	/* Page 800 works differently than the rest so it has its own func */
   11502 	if (page == BM_WUC_PAGE) {
   11503 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11504 		goto release;
   11505 	}
   11506 
   11507 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11508 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11509 		    && (sc->sc_type != WM_T_82583))
   11510 			rv = wm_gmii_mdic_writereg(dev, phy,
   11511 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11512 		else
   11513 			rv = wm_gmii_mdic_writereg(dev, phy,
   11514 			    BME1000_PHY_PAGE_SELECT, page);
   11515 		if (rv != 0)
   11516 			goto release;
   11517 	}
   11518 
   11519 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11520 
   11521 release:
   11522 	sc->phy.release(sc);
   11523 	return rv;
   11524 }
   11525 
   11526 /*
   11527  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11528  *  @dev: pointer to the HW structure
   11529  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11530  *
   11531  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11532  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11533  */
   11534 static int
   11535 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11536 {
   11537 #ifdef WM_DEBUG
   11538 	struct wm_softc *sc = device_private(dev);
   11539 #endif
   11540 	uint16_t temp;
   11541 	int rv;
   11542 
   11543 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11544 		device_xname(dev), __func__));
   11545 
   11546 	if (!phy_regp)
   11547 		return -1;
   11548 
   11549 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11550 
   11551 	/* Select Port Control Registers page */
   11552 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11553 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11554 	if (rv != 0)
   11555 		return rv;
   11556 
   11557 	/* Read WUCE and save it */
   11558 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11559 	if (rv != 0)
   11560 		return rv;
   11561 
   11562 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11563 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11564 	 */
   11565 	temp = *phy_regp;
   11566 	temp |= BM_WUC_ENABLE_BIT;
   11567 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11568 
   11569 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11570 		return rv;
   11571 
   11572 	/* Select Host Wakeup Registers page - caller now able to write
   11573 	 * registers on the Wakeup registers page
   11574 	 */
   11575 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11576 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11577 }
   11578 
   11579 /*
   11580  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11581  *  @dev: pointer to the HW structure
   11582  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11583  *
   11584  *  Restore BM_WUC_ENABLE_REG to its original value.
   11585  *
   11586  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11587  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11588  *  caller.
   11589  */
   11590 static int
   11591 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11592 {
   11593 #ifdef WM_DEBUG
   11594 	struct wm_softc *sc = device_private(dev);
   11595 #endif
   11596 
   11597 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11598 		device_xname(dev), __func__));
   11599 
   11600 	if (!phy_regp)
   11601 		return -1;
   11602 
   11603 	/* Select Port Control Registers page */
   11604 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11605 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11606 
   11607 	/* Restore 769.17 to its original value */
   11608 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11609 
   11610 	return 0;
   11611 }
   11612 
   11613 /*
   11614  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11615  *  @sc: pointer to the HW structure
   11616  *  @offset: register offset to be read or written
   11617  *  @val: pointer to the data to read or write
   11618  *  @rd: determines if operation is read or write
   11619  *  @page_set: BM_WUC_PAGE already set and access enabled
   11620  *
   11621  *  Read the PHY register at offset and store the retrieved information in
   11622  *  data, or write data to PHY register at offset.  Note the procedure to
   11623  *  access the PHY wakeup registers is different than reading the other PHY
   11624  *  registers. It works as such:
   11625  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11626  *  2) Set page to 800 for host (801 if we were manageability)
   11627  *  3) Write the address using the address opcode (0x11)
   11628  *  4) Read or write the data using the data opcode (0x12)
   11629  *  5) Restore 769.17.2 to its original value
   11630  *
   11631  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11632  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11633  *
   11634  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11635  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11636  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11637  */
   11638 static int
   11639 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11640 	bool page_set)
   11641 {
   11642 	struct wm_softc *sc = device_private(dev);
   11643 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11644 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11645 	uint16_t wuce;
   11646 	int rv = 0;
   11647 
   11648 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11649 		device_xname(dev), __func__));
   11650 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11651 	if ((sc->sc_type == WM_T_PCH)
   11652 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11653 		device_printf(dev,
   11654 		    "Attempting to access page %d while gig enabled.\n", page);
   11655 	}
   11656 
   11657 	if (!page_set) {
   11658 		/* Enable access to PHY wakeup registers */
   11659 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11660 		if (rv != 0) {
   11661 			device_printf(dev,
   11662 			    "%s: Could not enable PHY wakeup reg access\n",
   11663 			    __func__);
   11664 			return rv;
   11665 		}
   11666 	}
   11667 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11668 		device_xname(sc->sc_dev), __func__, page, regnum));
   11669 
   11670 	/*
   11671 	 * 2) Access PHY wakeup register.
   11672 	 * See wm_access_phy_wakeup_reg_bm.
   11673 	 */
   11674 
   11675 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11676 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11677 	if (rv != 0)
   11678 		return rv;
   11679 
   11680 	if (rd) {
   11681 		/* Read the Wakeup register page value using opcode 0x12 */
   11682 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11683 	} else {
   11684 		/* Write the Wakeup register page value using opcode 0x12 */
   11685 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11686 	}
   11687 	if (rv != 0)
   11688 		return rv;
   11689 
   11690 	if (!page_set)
   11691 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11692 
   11693 	return rv;
   11694 }
   11695 
   11696 /*
   11697  * wm_gmii_hv_readreg:	[mii interface function]
   11698  *
   11699  *	Read a PHY register on the kumeran
   11700  * This could be handled by the PHY layer if we didn't have to lock the
   11701  * resource ...
   11702  */
   11703 static int
   11704 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11705 {
   11706 	struct wm_softc *sc = device_private(dev);
   11707 	int rv;
   11708 
   11709 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11710 		device_xname(dev), __func__));
   11711 	if (sc->phy.acquire(sc)) {
   11712 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11713 		return -1;
   11714 	}
   11715 
   11716 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11717 	sc->phy.release(sc);
   11718 	return rv;
   11719 }
   11720 
   11721 static int
   11722 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11723 {
   11724 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11725 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11726 	int rv;
   11727 
   11728 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11729 
   11730 	/* Page 800 works differently than the rest so it has its own func */
   11731 	if (page == BM_WUC_PAGE)
   11732 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11733 
   11734 	/*
   11735 	 * Lower than page 768 works differently than the rest so it has its
   11736 	 * own func
   11737 	 */
   11738 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11739 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11740 		return -1;
   11741 	}
   11742 
   11743 	/*
   11744 	 * XXX I21[789] documents say that the SMBus Address register is at
   11745 	 * PHY address 01, Page 0 (not 768), Register 26.
   11746 	 */
   11747 	if (page == HV_INTC_FC_PAGE_START)
   11748 		page = 0;
   11749 
   11750 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11751 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11752 		    page << BME1000_PAGE_SHIFT);
   11753 		if (rv != 0)
   11754 			return rv;
   11755 	}
   11756 
   11757 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11758 }
   11759 
   11760 /*
   11761  * wm_gmii_hv_writereg:	[mii interface function]
   11762  *
   11763  *	Write a PHY register on the kumeran.
   11764  * This could be handled by the PHY layer if we didn't have to lock the
   11765  * resource ...
   11766  */
   11767 static int
   11768 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11769 {
   11770 	struct wm_softc *sc = device_private(dev);
   11771 	int rv;
   11772 
   11773 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11774 		device_xname(dev), __func__));
   11775 
   11776 	if (sc->phy.acquire(sc)) {
   11777 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11778 		return -1;
   11779 	}
   11780 
   11781 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11782 	sc->phy.release(sc);
   11783 
   11784 	return rv;
   11785 }
   11786 
   11787 static int
   11788 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11789 {
   11790 	struct wm_softc *sc = device_private(dev);
   11791 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11792 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11793 	int rv;
   11794 
   11795 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11796 
   11797 	/* Page 800 works differently than the rest so it has its own func */
   11798 	if (page == BM_WUC_PAGE)
   11799 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11800 		    false);
   11801 
   11802 	/*
   11803 	 * Lower than page 768 works differently than the rest so it has its
   11804 	 * own func
   11805 	 */
   11806 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11807 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11808 		return -1;
   11809 	}
   11810 
   11811 	{
   11812 		/*
   11813 		 * XXX I21[789] documents say that the SMBus Address register
   11814 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11815 		 */
   11816 		if (page == HV_INTC_FC_PAGE_START)
   11817 			page = 0;
   11818 
   11819 		/*
   11820 		 * XXX Workaround MDIO accesses being disabled after entering
   11821 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11822 		 * register is set)
   11823 		 */
   11824 		if (sc->sc_phytype == WMPHY_82578) {
   11825 			struct mii_softc *child;
   11826 
   11827 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11828 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11829 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11830 			    && ((val & (1 << 11)) != 0)) {
   11831 				device_printf(dev, "XXX need workaround\n");
   11832 			}
   11833 		}
   11834 
   11835 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11836 			rv = wm_gmii_mdic_writereg(dev, 1,
   11837 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11838 			if (rv != 0)
   11839 				return rv;
   11840 		}
   11841 	}
   11842 
   11843 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11844 }
   11845 
   11846 /*
   11847  * wm_gmii_82580_readreg:	[mii interface function]
   11848  *
   11849  *	Read a PHY register on the 82580 and I350.
   11850  * This could be handled by the PHY layer if we didn't have to lock the
   11851  * resource ...
   11852  */
   11853 static int
   11854 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11855 {
   11856 	struct wm_softc *sc = device_private(dev);
   11857 	int rv;
   11858 
   11859 	if (sc->phy.acquire(sc) != 0) {
   11860 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11861 		return -1;
   11862 	}
   11863 
   11864 #ifdef DIAGNOSTIC
   11865 	if (reg > MII_ADDRMASK) {
   11866 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11867 		    __func__, sc->sc_phytype, reg);
   11868 		reg &= MII_ADDRMASK;
   11869 	}
   11870 #endif
   11871 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11872 
   11873 	sc->phy.release(sc);
   11874 	return rv;
   11875 }
   11876 
   11877 /*
   11878  * wm_gmii_82580_writereg:	[mii interface function]
   11879  *
   11880  *	Write a PHY register on the 82580 and I350.
   11881  * This could be handled by the PHY layer if we didn't have to lock the
   11882  * resource ...
   11883  */
   11884 static int
   11885 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11886 {
   11887 	struct wm_softc *sc = device_private(dev);
   11888 	int rv;
   11889 
   11890 	if (sc->phy.acquire(sc) != 0) {
   11891 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11892 		return -1;
   11893 	}
   11894 
   11895 #ifdef DIAGNOSTIC
   11896 	if (reg > MII_ADDRMASK) {
   11897 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11898 		    __func__, sc->sc_phytype, reg);
   11899 		reg &= MII_ADDRMASK;
   11900 	}
   11901 #endif
   11902 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11903 
   11904 	sc->phy.release(sc);
   11905 	return rv;
   11906 }
   11907 
   11908 /*
   11909  * wm_gmii_gs40g_readreg:	[mii interface function]
   11910  *
   11911  *	Read a PHY register on the I2100 and I211.
   11912  * This could be handled by the PHY layer if we didn't have to lock the
   11913  * resource ...
   11914  */
   11915 static int
   11916 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11917 {
   11918 	struct wm_softc *sc = device_private(dev);
   11919 	int page, offset;
   11920 	int rv;
   11921 
   11922 	/* Acquire semaphore */
   11923 	if (sc->phy.acquire(sc)) {
   11924 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11925 		return -1;
   11926 	}
   11927 
   11928 	/* Page select */
   11929 	page = reg >> GS40G_PAGE_SHIFT;
   11930 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11931 	if (rv != 0)
   11932 		goto release;
   11933 
   11934 	/* Read reg */
   11935 	offset = reg & GS40G_OFFSET_MASK;
   11936 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11937 
   11938 release:
   11939 	sc->phy.release(sc);
   11940 	return rv;
   11941 }
   11942 
   11943 /*
   11944  * wm_gmii_gs40g_writereg:	[mii interface function]
   11945  *
   11946  *	Write a PHY register on the I210 and I211.
   11947  * This could be handled by the PHY layer if we didn't have to lock the
   11948  * resource ...
   11949  */
   11950 static int
   11951 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11952 {
   11953 	struct wm_softc *sc = device_private(dev);
   11954 	uint16_t page;
   11955 	int offset, rv;
   11956 
   11957 	/* Acquire semaphore */
   11958 	if (sc->phy.acquire(sc)) {
   11959 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11960 		return -1;
   11961 	}
   11962 
   11963 	/* Page select */
   11964 	page = reg >> GS40G_PAGE_SHIFT;
   11965 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11966 	if (rv != 0)
   11967 		goto release;
   11968 
   11969 	/* Write reg */
   11970 	offset = reg & GS40G_OFFSET_MASK;
   11971 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11972 
   11973 release:
   11974 	/* Release semaphore */
   11975 	sc->phy.release(sc);
   11976 	return rv;
   11977 }
   11978 
   11979 /*
   11980  * wm_gmii_statchg:	[mii interface function]
   11981  *
   11982  *	Callback from MII layer when media changes.
   11983  */
   11984 static void
   11985 wm_gmii_statchg(struct ifnet *ifp)
   11986 {
   11987 	struct wm_softc *sc = ifp->if_softc;
   11988 	struct mii_data *mii = &sc->sc_mii;
   11989 
   11990 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11991 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11992 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11993 
   11994 	/* Get flow control negotiation result. */
   11995 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11996 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11997 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11998 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11999 	}
   12000 
   12001 	if (sc->sc_flowflags & IFM_FLOW) {
   12002 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   12003 			sc->sc_ctrl |= CTRL_TFCE;
   12004 			sc->sc_fcrtl |= FCRTL_XONE;
   12005 		}
   12006 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   12007 			sc->sc_ctrl |= CTRL_RFCE;
   12008 	}
   12009 
   12010 	if (mii->mii_media_active & IFM_FDX) {
   12011 		DPRINTF(sc, WM_DEBUG_LINK,
   12012 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   12013 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12014 	} else {
   12015 		DPRINTF(sc, WM_DEBUG_LINK,
   12016 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   12017 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12018 	}
   12019 
   12020 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12021 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12022 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   12023 						 : WMREG_FCRTL, sc->sc_fcrtl);
   12024 	if (sc->sc_type == WM_T_80003) {
   12025 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   12026 		case IFM_1000_T:
   12027 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12028 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   12029 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   12030 			break;
   12031 		default:
   12032 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12033 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   12034 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   12035 			break;
   12036 		}
   12037 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   12038 	}
   12039 }
   12040 
   12041 /* kumeran related (80003, ICH* and PCH*) */
   12042 
   12043 /*
   12044  * wm_kmrn_readreg:
   12045  *
   12046  *	Read a kumeran register
   12047  */
   12048 static int
   12049 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   12050 {
   12051 	int rv;
   12052 
   12053 	if (sc->sc_type == WM_T_80003)
   12054 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12055 	else
   12056 		rv = sc->phy.acquire(sc);
   12057 	if (rv != 0) {
   12058 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12059 		    __func__);
   12060 		return rv;
   12061 	}
   12062 
   12063 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   12064 
   12065 	if (sc->sc_type == WM_T_80003)
   12066 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12067 	else
   12068 		sc->phy.release(sc);
   12069 
   12070 	return rv;
   12071 }
   12072 
   12073 static int
   12074 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   12075 {
   12076 
   12077 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12078 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   12079 	    KUMCTRLSTA_REN);
   12080 	CSR_WRITE_FLUSH(sc);
   12081 	delay(2);
   12082 
   12083 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   12084 
   12085 	return 0;
   12086 }
   12087 
   12088 /*
   12089  * wm_kmrn_writereg:
   12090  *
   12091  *	Write a kumeran register
   12092  */
   12093 static int
   12094 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   12095 {
   12096 	int rv;
   12097 
   12098 	if (sc->sc_type == WM_T_80003)
   12099 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12100 	else
   12101 		rv = sc->phy.acquire(sc);
   12102 	if (rv != 0) {
   12103 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12104 		    __func__);
   12105 		return rv;
   12106 	}
   12107 
   12108 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   12109 
   12110 	if (sc->sc_type == WM_T_80003)
   12111 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12112 	else
   12113 		sc->phy.release(sc);
   12114 
   12115 	return rv;
   12116 }
   12117 
   12118 static int
   12119 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   12120 {
   12121 
   12122 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12123 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   12124 
   12125 	return 0;
   12126 }
   12127 
   12128 /*
   12129  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   12130  * This access method is different from IEEE MMD.
   12131  */
   12132 static int
   12133 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   12134 {
   12135 	struct wm_softc *sc = device_private(dev);
   12136 	int rv;
   12137 
   12138 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   12139 	if (rv != 0)
   12140 		return rv;
   12141 
   12142 	if (rd)
   12143 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   12144 	else
   12145 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12146 	return rv;
   12147 }
   12148 
   12149 static int
   12150 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12151 {
   12152 
   12153 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12154 }
   12155 
   12156 static int
   12157 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12158 {
   12159 
   12160 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12161 }
   12162 
   12163 /* SGMII related */
   12164 
   12165 /*
   12166  * wm_sgmii_uses_mdio
   12167  *
   12168  * Check whether the transaction is to the internal PHY or the external
   12169  * MDIO interface. Return true if it's MDIO.
   12170  */
   12171 static bool
   12172 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12173 {
   12174 	uint32_t reg;
   12175 	bool ismdio = false;
   12176 
   12177 	switch (sc->sc_type) {
   12178 	case WM_T_82575:
   12179 	case WM_T_82576:
   12180 		reg = CSR_READ(sc, WMREG_MDIC);
   12181 		ismdio = ((reg & MDIC_DEST) != 0);
   12182 		break;
   12183 	case WM_T_82580:
   12184 	case WM_T_I350:
   12185 	case WM_T_I354:
   12186 	case WM_T_I210:
   12187 	case WM_T_I211:
   12188 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12189 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12190 		break;
   12191 	default:
   12192 		break;
   12193 	}
   12194 
   12195 	return ismdio;
   12196 }
   12197 
   12198 /* Setup internal SGMII PHY for SFP */
   12199 static void
   12200 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12201 {
   12202 	uint16_t id1, id2, phyreg;
   12203 	int i, rv;
   12204 
   12205 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12206 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12207 		return;
   12208 
   12209 	for (i = 0; i < MII_NPHY; i++) {
   12210 		sc->phy.no_errprint = true;
   12211 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12212 		if (rv != 0)
   12213 			continue;
   12214 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12215 		if (rv != 0)
   12216 			continue;
   12217 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12218 			continue;
   12219 		sc->phy.no_errprint = false;
   12220 
   12221 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12222 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12223 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12224 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12225 		break;
   12226 	}
   12227 
   12228 }
   12229 
   12230 /*
   12231  * wm_sgmii_readreg:	[mii interface function]
   12232  *
   12233  *	Read a PHY register on the SGMII
   12234  * This could be handled by the PHY layer if we didn't have to lock the
   12235  * resource ...
   12236  */
   12237 static int
   12238 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12239 {
   12240 	struct wm_softc *sc = device_private(dev);
   12241 	int rv;
   12242 
   12243 	if (sc->phy.acquire(sc)) {
   12244 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12245 		return -1;
   12246 	}
   12247 
   12248 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12249 
   12250 	sc->phy.release(sc);
   12251 	return rv;
   12252 }
   12253 
   12254 static int
   12255 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12256 {
   12257 	struct wm_softc *sc = device_private(dev);
   12258 	uint32_t i2ccmd;
   12259 	int i, rv = 0;
   12260 
   12261 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12262 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12263 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12264 
   12265 	/* Poll the ready bit */
   12266 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12267 		delay(50);
   12268 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12269 		if (i2ccmd & I2CCMD_READY)
   12270 			break;
   12271 	}
   12272 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12273 		device_printf(dev, "I2CCMD Read did not complete\n");
   12274 		rv = ETIMEDOUT;
   12275 	}
   12276 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12277 		if (!sc->phy.no_errprint)
   12278 			device_printf(dev, "I2CCMD Error bit set\n");
   12279 		rv = EIO;
   12280 	}
   12281 
   12282 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12283 
   12284 	return rv;
   12285 }
   12286 
   12287 /*
   12288  * wm_sgmii_writereg:	[mii interface function]
   12289  *
   12290  *	Write a PHY register on the SGMII.
   12291  * This could be handled by the PHY layer if we didn't have to lock the
   12292  * resource ...
   12293  */
   12294 static int
   12295 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12296 {
   12297 	struct wm_softc *sc = device_private(dev);
   12298 	int rv;
   12299 
   12300 	if (sc->phy.acquire(sc) != 0) {
   12301 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12302 		return -1;
   12303 	}
   12304 
   12305 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12306 
   12307 	sc->phy.release(sc);
   12308 
   12309 	return rv;
   12310 }
   12311 
   12312 static int
   12313 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12314 {
   12315 	struct wm_softc *sc = device_private(dev);
   12316 	uint32_t i2ccmd;
   12317 	uint16_t swapdata;
   12318 	int rv = 0;
   12319 	int i;
   12320 
   12321 	/* Swap the data bytes for the I2C interface */
   12322 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12323 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12324 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12325 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12326 
   12327 	/* Poll the ready bit */
   12328 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12329 		delay(50);
   12330 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12331 		if (i2ccmd & I2CCMD_READY)
   12332 			break;
   12333 	}
   12334 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12335 		device_printf(dev, "I2CCMD Write did not complete\n");
   12336 		rv = ETIMEDOUT;
   12337 	}
   12338 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12339 		device_printf(dev, "I2CCMD Error bit set\n");
   12340 		rv = EIO;
   12341 	}
   12342 
   12343 	return rv;
   12344 }
   12345 
   12346 /* TBI related */
   12347 
   12348 static bool
   12349 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12350 {
   12351 	bool sig;
   12352 
   12353 	sig = ctrl & CTRL_SWDPIN(1);
   12354 
   12355 	/*
   12356 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12357 	 * detect a signal, 1 if they don't.
   12358 	 */
   12359 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12360 		sig = !sig;
   12361 
   12362 	return sig;
   12363 }
   12364 
   12365 /*
   12366  * wm_tbi_mediainit:
   12367  *
   12368  *	Initialize media for use on 1000BASE-X devices.
   12369  */
   12370 static void
   12371 wm_tbi_mediainit(struct wm_softc *sc)
   12372 {
   12373 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12374 	const char *sep = "";
   12375 
   12376 	if (sc->sc_type < WM_T_82543)
   12377 		sc->sc_tipg = TIPG_WM_DFLT;
   12378 	else
   12379 		sc->sc_tipg = TIPG_LG_DFLT;
   12380 
   12381 	sc->sc_tbi_serdes_anegticks = 5;
   12382 
   12383 	/* Initialize our media structures */
   12384 	sc->sc_mii.mii_ifp = ifp;
   12385 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12386 
   12387 	ifp->if_baudrate = IF_Gbps(1);
   12388 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12389 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12390 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12391 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12392 		    sc->sc_core_lock);
   12393 	} else {
   12394 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12395 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12396 	}
   12397 
   12398 	/*
   12399 	 * SWD Pins:
   12400 	 *
   12401 	 *	0 = Link LED (output)
   12402 	 *	1 = Loss Of Signal (input)
   12403 	 */
   12404 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12405 
   12406 	/* XXX Perhaps this is only for TBI */
   12407 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12408 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12409 
   12410 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12411 		sc->sc_ctrl &= ~CTRL_LRST;
   12412 
   12413 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12414 
   12415 #define	ADD(ss, mm, dd)							\
   12416 do {									\
   12417 	aprint_normal("%s%s", sep, ss);					\
   12418 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12419 	sep = ", ";							\
   12420 } while (/*CONSTCOND*/0)
   12421 
   12422 	aprint_normal_dev(sc->sc_dev, "");
   12423 
   12424 	if (sc->sc_type == WM_T_I354) {
   12425 		uint32_t status;
   12426 
   12427 		status = CSR_READ(sc, WMREG_STATUS);
   12428 		if (((status & STATUS_2P5_SKU) != 0)
   12429 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12430 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12431 		} else
   12432 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12433 	} else if (sc->sc_type == WM_T_82545) {
   12434 		/* Only 82545 is LX (XXX except SFP) */
   12435 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12436 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12437 	} else if (sc->sc_sfptype != 0) {
   12438 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12439 		switch (sc->sc_sfptype) {
   12440 		default:
   12441 		case SFF_SFP_ETH_FLAGS_1000SX:
   12442 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12443 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12444 			break;
   12445 		case SFF_SFP_ETH_FLAGS_1000LX:
   12446 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12447 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12448 			break;
   12449 		case SFF_SFP_ETH_FLAGS_1000CX:
   12450 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12451 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12452 			break;
   12453 		case SFF_SFP_ETH_FLAGS_1000T:
   12454 			ADD("1000baseT", IFM_1000_T, 0);
   12455 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12456 			break;
   12457 		case SFF_SFP_ETH_FLAGS_100FX:
   12458 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12459 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12460 			break;
   12461 		}
   12462 	} else {
   12463 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12464 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12465 	}
   12466 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12467 	aprint_normal("\n");
   12468 
   12469 #undef ADD
   12470 
   12471 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12472 }
   12473 
   12474 /*
   12475  * wm_tbi_mediachange:	[ifmedia interface function]
   12476  *
   12477  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12478  */
   12479 static int
   12480 wm_tbi_mediachange(struct ifnet *ifp)
   12481 {
   12482 	struct wm_softc *sc = ifp->if_softc;
   12483 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12484 	uint32_t status, ctrl;
   12485 	bool signal;
   12486 	int i;
   12487 
   12488 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12489 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12490 		/* XXX need some work for >= 82571 and < 82575 */
   12491 		if (sc->sc_type < WM_T_82575)
   12492 			return 0;
   12493 	}
   12494 
   12495 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12496 	    || (sc->sc_type >= WM_T_82575))
   12497 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12498 
   12499 	sc->sc_ctrl &= ~CTRL_LRST;
   12500 	sc->sc_txcw = TXCW_ANE;
   12501 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12502 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12503 	else if (ife->ifm_media & IFM_FDX)
   12504 		sc->sc_txcw |= TXCW_FD;
   12505 	else
   12506 		sc->sc_txcw |= TXCW_HD;
   12507 
   12508 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12509 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12510 
   12511 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12512 		device_xname(sc->sc_dev), sc->sc_txcw));
   12513 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12514 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12515 	CSR_WRITE_FLUSH(sc);
   12516 	delay(1000);
   12517 
   12518 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12519 	signal = wm_tbi_havesignal(sc, ctrl);
   12520 
   12521 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12522 		signal));
   12523 
   12524 	if (signal) {
   12525 		/* Have signal; wait for the link to come up. */
   12526 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12527 			delay(10000);
   12528 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12529 				break;
   12530 		}
   12531 
   12532 		DPRINTF(sc, WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12533 			device_xname(sc->sc_dev), i));
   12534 
   12535 		status = CSR_READ(sc, WMREG_STATUS);
   12536 		DPRINTF(sc, WM_DEBUG_LINK,
   12537 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12538 			device_xname(sc->sc_dev), status, STATUS_LU));
   12539 		if (status & STATUS_LU) {
   12540 			/* Link is up. */
   12541 			DPRINTF(sc, WM_DEBUG_LINK,
   12542 			    ("%s: LINK: set media -> link up %s\n",
   12543 				device_xname(sc->sc_dev),
   12544 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12545 
   12546 			/*
   12547 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12548 			 * so we should update sc->sc_ctrl
   12549 			 */
   12550 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12551 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12552 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12553 			if (status & STATUS_FD)
   12554 				sc->sc_tctl |=
   12555 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12556 			else
   12557 				sc->sc_tctl |=
   12558 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12559 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12560 				sc->sc_fcrtl |= FCRTL_XONE;
   12561 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12562 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12563 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12564 			sc->sc_tbi_linkup = 1;
   12565 		} else {
   12566 			if (i == WM_LINKUP_TIMEOUT)
   12567 				wm_check_for_link(sc);
   12568 			/* Link is down. */
   12569 			DPRINTF(sc, WM_DEBUG_LINK,
   12570 			    ("%s: LINK: set media -> link down\n",
   12571 				device_xname(sc->sc_dev)));
   12572 			sc->sc_tbi_linkup = 0;
   12573 		}
   12574 	} else {
   12575 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12576 			device_xname(sc->sc_dev)));
   12577 		sc->sc_tbi_linkup = 0;
   12578 	}
   12579 
   12580 	wm_tbi_serdes_set_linkled(sc);
   12581 
   12582 	return 0;
   12583 }
   12584 
   12585 /*
   12586  * wm_tbi_mediastatus:	[ifmedia interface function]
   12587  *
   12588  *	Get the current interface media status on a 1000BASE-X device.
   12589  */
   12590 static void
   12591 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12592 {
   12593 	struct wm_softc *sc = ifp->if_softc;
   12594 	uint32_t ctrl, status;
   12595 
   12596 	ifmr->ifm_status = IFM_AVALID;
   12597 	ifmr->ifm_active = IFM_ETHER;
   12598 
   12599 	status = CSR_READ(sc, WMREG_STATUS);
   12600 	if ((status & STATUS_LU) == 0) {
   12601 		ifmr->ifm_active |= IFM_NONE;
   12602 		return;
   12603 	}
   12604 
   12605 	ifmr->ifm_status |= IFM_ACTIVE;
   12606 	/* Only 82545 is LX */
   12607 	if (sc->sc_type == WM_T_82545)
   12608 		ifmr->ifm_active |= IFM_1000_LX;
   12609 	else
   12610 		ifmr->ifm_active |= IFM_1000_SX;
   12611 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12612 		ifmr->ifm_active |= IFM_FDX;
   12613 	else
   12614 		ifmr->ifm_active |= IFM_HDX;
   12615 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12616 	if (ctrl & CTRL_RFCE)
   12617 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12618 	if (ctrl & CTRL_TFCE)
   12619 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12620 }
   12621 
   12622 /* XXX TBI only */
   12623 static int
   12624 wm_check_for_link(struct wm_softc *sc)
   12625 {
   12626 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12627 	uint32_t rxcw;
   12628 	uint32_t ctrl;
   12629 	uint32_t status;
   12630 	bool signal;
   12631 
   12632 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   12633 		device_xname(sc->sc_dev), __func__));
   12634 
   12635 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12636 		/* XXX need some work for >= 82571 */
   12637 		if (sc->sc_type >= WM_T_82571) {
   12638 			sc->sc_tbi_linkup = 1;
   12639 			return 0;
   12640 		}
   12641 	}
   12642 
   12643 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12644 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12645 	status = CSR_READ(sc, WMREG_STATUS);
   12646 	signal = wm_tbi_havesignal(sc, ctrl);
   12647 
   12648 	DPRINTF(sc, WM_DEBUG_LINK,
   12649 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12650 		device_xname(sc->sc_dev), __func__, signal,
   12651 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12652 
   12653 	/*
   12654 	 * SWDPIN   LU RXCW
   12655 	 *	0    0	  0
   12656 	 *	0    0	  1	(should not happen)
   12657 	 *	0    1	  0	(should not happen)
   12658 	 *	0    1	  1	(should not happen)
   12659 	 *	1    0	  0	Disable autonego and force linkup
   12660 	 *	1    0	  1	got /C/ but not linkup yet
   12661 	 *	1    1	  0	(linkup)
   12662 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12663 	 *
   12664 	 */
   12665 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12666 		DPRINTF(sc, WM_DEBUG_LINK,
   12667 		    ("%s: %s: force linkup and fullduplex\n",
   12668 			device_xname(sc->sc_dev), __func__));
   12669 		sc->sc_tbi_linkup = 0;
   12670 		/* Disable auto-negotiation in the TXCW register */
   12671 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12672 
   12673 		/*
   12674 		 * Force link-up and also force full-duplex.
   12675 		 *
   12676 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12677 		 * so we should update sc->sc_ctrl
   12678 		 */
   12679 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12680 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12681 	} else if (((status & STATUS_LU) != 0)
   12682 	    && ((rxcw & RXCW_C) != 0)
   12683 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12684 		sc->sc_tbi_linkup = 1;
   12685 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12686 			device_xname(sc->sc_dev),
   12687 			__func__));
   12688 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12689 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12690 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12691 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   12692 			device_xname(sc->sc_dev), __func__));
   12693 	} else {
   12694 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12695 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12696 			status));
   12697 	}
   12698 
   12699 	return 0;
   12700 }
   12701 
   12702 /*
   12703  * wm_tbi_tick:
   12704  *
   12705  *	Check the link on TBI devices.
   12706  *	This function acts as mii_tick().
   12707  */
   12708 static void
   12709 wm_tbi_tick(struct wm_softc *sc)
   12710 {
   12711 	struct mii_data *mii = &sc->sc_mii;
   12712 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12713 	uint32_t status;
   12714 
   12715 	KASSERT(WM_CORE_LOCKED(sc));
   12716 
   12717 	status = CSR_READ(sc, WMREG_STATUS);
   12718 
   12719 	/* XXX is this needed? */
   12720 	(void)CSR_READ(sc, WMREG_RXCW);
   12721 	(void)CSR_READ(sc, WMREG_CTRL);
   12722 
   12723 	/* set link status */
   12724 	if ((status & STATUS_LU) == 0) {
   12725 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12726 			device_xname(sc->sc_dev)));
   12727 		sc->sc_tbi_linkup = 0;
   12728 	} else if (sc->sc_tbi_linkup == 0) {
   12729 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12730 			device_xname(sc->sc_dev),
   12731 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12732 		sc->sc_tbi_linkup = 1;
   12733 		sc->sc_tbi_serdes_ticks = 0;
   12734 	}
   12735 
   12736 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12737 		goto setled;
   12738 
   12739 	if ((status & STATUS_LU) == 0) {
   12740 		sc->sc_tbi_linkup = 0;
   12741 		/* If the timer expired, retry autonegotiation */
   12742 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12743 		    && (++sc->sc_tbi_serdes_ticks
   12744 			>= sc->sc_tbi_serdes_anegticks)) {
   12745 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12746 				device_xname(sc->sc_dev), __func__));
   12747 			sc->sc_tbi_serdes_ticks = 0;
   12748 			/*
   12749 			 * Reset the link, and let autonegotiation do
   12750 			 * its thing
   12751 			 */
   12752 			sc->sc_ctrl |= CTRL_LRST;
   12753 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12754 			CSR_WRITE_FLUSH(sc);
   12755 			delay(1000);
   12756 			sc->sc_ctrl &= ~CTRL_LRST;
   12757 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12758 			CSR_WRITE_FLUSH(sc);
   12759 			delay(1000);
   12760 			CSR_WRITE(sc, WMREG_TXCW,
   12761 			    sc->sc_txcw & ~TXCW_ANE);
   12762 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12763 		}
   12764 	}
   12765 
   12766 setled:
   12767 	wm_tbi_serdes_set_linkled(sc);
   12768 }
   12769 
   12770 /* SERDES related */
   12771 static void
   12772 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12773 {
   12774 	uint32_t reg;
   12775 
   12776 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12777 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12778 		return;
   12779 
   12780 	/* Enable PCS to turn on link */
   12781 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12782 	reg |= PCS_CFG_PCS_EN;
   12783 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12784 
   12785 	/* Power up the laser */
   12786 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12787 	reg &= ~CTRL_EXT_SWDPIN(3);
   12788 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12789 
   12790 	/* Flush the write to verify completion */
   12791 	CSR_WRITE_FLUSH(sc);
   12792 	delay(1000);
   12793 }
   12794 
   12795 static int
   12796 wm_serdes_mediachange(struct ifnet *ifp)
   12797 {
   12798 	struct wm_softc *sc = ifp->if_softc;
   12799 	bool pcs_autoneg = true; /* XXX */
   12800 	uint32_t ctrl_ext, pcs_lctl, reg;
   12801 
   12802 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12803 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12804 		return 0;
   12805 
   12806 	/* XXX Currently, this function is not called on 8257[12] */
   12807 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12808 	    || (sc->sc_type >= WM_T_82575))
   12809 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12810 
   12811 	/* Power on the sfp cage if present */
   12812 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12813 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12814 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12815 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12816 
   12817 	sc->sc_ctrl |= CTRL_SLU;
   12818 
   12819 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   12820 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12821 
   12822 		reg = CSR_READ(sc, WMREG_CONNSW);
   12823 		reg |= CONNSW_ENRGSRC;
   12824 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   12825 	}
   12826 
   12827 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12828 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12829 	case CTRL_EXT_LINK_MODE_SGMII:
   12830 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12831 		pcs_autoneg = true;
   12832 		/* Autoneg time out should be disabled for SGMII mode */
   12833 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12834 		break;
   12835 	case CTRL_EXT_LINK_MODE_1000KX:
   12836 		pcs_autoneg = false;
   12837 		/* FALLTHROUGH */
   12838 	default:
   12839 		if ((sc->sc_type == WM_T_82575)
   12840 		    || (sc->sc_type == WM_T_82576)) {
   12841 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12842 				pcs_autoneg = false;
   12843 		}
   12844 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12845 		    | CTRL_FRCFDX;
   12846 
   12847 		/* Set speed of 1000/Full if speed/duplex is forced */
   12848 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12849 	}
   12850 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12851 
   12852 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12853 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12854 
   12855 	if (pcs_autoneg) {
   12856 		/* Set PCS register for autoneg */
   12857 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12858 
   12859 		/* Disable force flow control for autoneg */
   12860 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12861 
   12862 		/* Configure flow control advertisement for autoneg */
   12863 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12864 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12865 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12866 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12867 	} else
   12868 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12869 
   12870 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12871 
   12872 	return 0;
   12873 }
   12874 
   12875 static void
   12876 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12877 {
   12878 	struct wm_softc *sc = ifp->if_softc;
   12879 	struct mii_data *mii = &sc->sc_mii;
   12880 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12881 	uint32_t pcs_adv, pcs_lpab, reg;
   12882 
   12883 	ifmr->ifm_status = IFM_AVALID;
   12884 	ifmr->ifm_active = IFM_ETHER;
   12885 
   12886 	/* Check PCS */
   12887 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12888 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12889 		ifmr->ifm_active |= IFM_NONE;
   12890 		sc->sc_tbi_linkup = 0;
   12891 		goto setled;
   12892 	}
   12893 
   12894 	sc->sc_tbi_linkup = 1;
   12895 	ifmr->ifm_status |= IFM_ACTIVE;
   12896 	if (sc->sc_type == WM_T_I354) {
   12897 		uint32_t status;
   12898 
   12899 		status = CSR_READ(sc, WMREG_STATUS);
   12900 		if (((status & STATUS_2P5_SKU) != 0)
   12901 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12902 			ifmr->ifm_active |= IFM_2500_KX;
   12903 		} else
   12904 			ifmr->ifm_active |= IFM_1000_KX;
   12905 	} else {
   12906 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12907 		case PCS_LSTS_SPEED_10:
   12908 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12909 			break;
   12910 		case PCS_LSTS_SPEED_100:
   12911 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12912 			break;
   12913 		case PCS_LSTS_SPEED_1000:
   12914 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12915 			break;
   12916 		default:
   12917 			device_printf(sc->sc_dev, "Unknown speed\n");
   12918 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12919 			break;
   12920 		}
   12921 	}
   12922 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   12923 	if ((reg & PCS_LSTS_FDX) != 0)
   12924 		ifmr->ifm_active |= IFM_FDX;
   12925 	else
   12926 		ifmr->ifm_active |= IFM_HDX;
   12927 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12928 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12929 		/* Check flow */
   12930 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12931 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12932 			DPRINTF(sc, WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12933 			goto setled;
   12934 		}
   12935 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12936 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12937 		DPRINTF(sc, WM_DEBUG_LINK,
   12938 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12939 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12940 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12941 			mii->mii_media_active |= IFM_FLOW
   12942 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12943 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12944 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12945 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12946 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12947 			mii->mii_media_active |= IFM_FLOW
   12948 			    | IFM_ETH_TXPAUSE;
   12949 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12950 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12951 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12952 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12953 			mii->mii_media_active |= IFM_FLOW
   12954 			    | IFM_ETH_RXPAUSE;
   12955 		}
   12956 	}
   12957 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12958 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12959 setled:
   12960 	wm_tbi_serdes_set_linkled(sc);
   12961 }
   12962 
   12963 /*
   12964  * wm_serdes_tick:
   12965  *
   12966  *	Check the link on serdes devices.
   12967  */
   12968 static void
   12969 wm_serdes_tick(struct wm_softc *sc)
   12970 {
   12971 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12972 	struct mii_data *mii = &sc->sc_mii;
   12973 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12974 	uint32_t reg;
   12975 
   12976 	KASSERT(WM_CORE_LOCKED(sc));
   12977 
   12978 	mii->mii_media_status = IFM_AVALID;
   12979 	mii->mii_media_active = IFM_ETHER;
   12980 
   12981 	/* Check PCS */
   12982 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12983 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12984 		mii->mii_media_status |= IFM_ACTIVE;
   12985 		sc->sc_tbi_linkup = 1;
   12986 		sc->sc_tbi_serdes_ticks = 0;
   12987 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12988 		if ((reg & PCS_LSTS_FDX) != 0)
   12989 			mii->mii_media_active |= IFM_FDX;
   12990 		else
   12991 			mii->mii_media_active |= IFM_HDX;
   12992 	} else {
   12993 		mii->mii_media_status |= IFM_NONE;
   12994 		sc->sc_tbi_linkup = 0;
   12995 		/* If the timer expired, retry autonegotiation */
   12996 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12997 		    && (++sc->sc_tbi_serdes_ticks
   12998 			>= sc->sc_tbi_serdes_anegticks)) {
   12999 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13000 				device_xname(sc->sc_dev), __func__));
   13001 			sc->sc_tbi_serdes_ticks = 0;
   13002 			/* XXX */
   13003 			wm_serdes_mediachange(ifp);
   13004 		}
   13005 	}
   13006 
   13007 	wm_tbi_serdes_set_linkled(sc);
   13008 }
   13009 
   13010 /* SFP related */
   13011 
   13012 static int
   13013 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   13014 {
   13015 	uint32_t i2ccmd;
   13016 	int i;
   13017 
   13018 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13019 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13020 
   13021 	/* Poll the ready bit */
   13022 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13023 		delay(50);
   13024 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13025 		if (i2ccmd & I2CCMD_READY)
   13026 			break;
   13027 	}
   13028 	if ((i2ccmd & I2CCMD_READY) == 0)
   13029 		return -1;
   13030 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   13031 		return -1;
   13032 
   13033 	*data = i2ccmd & 0x00ff;
   13034 
   13035 	return 0;
   13036 }
   13037 
   13038 static uint32_t
   13039 wm_sfp_get_media_type(struct wm_softc *sc)
   13040 {
   13041 	uint32_t ctrl_ext;
   13042 	uint8_t val = 0;
   13043 	int timeout = 3;
   13044 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   13045 	int rv = -1;
   13046 
   13047 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13048 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13049 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   13050 	CSR_WRITE_FLUSH(sc);
   13051 
   13052 	/* Read SFP module data */
   13053 	while (timeout) {
   13054 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   13055 		if (rv == 0)
   13056 			break;
   13057 		delay(100*1000); /* XXX too big */
   13058 		timeout--;
   13059 	}
   13060 	if (rv != 0)
   13061 		goto out;
   13062 
   13063 	switch (val) {
   13064 	case SFF_SFP_ID_SFF:
   13065 		aprint_normal_dev(sc->sc_dev,
   13066 		    "Module/Connector soldered to board\n");
   13067 		break;
   13068 	case SFF_SFP_ID_SFP:
   13069 		sc->sc_flags |= WM_F_SFP;
   13070 		break;
   13071 	case SFF_SFP_ID_UNKNOWN:
   13072 		goto out;
   13073 	default:
   13074 		break;
   13075 	}
   13076 
   13077 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   13078 	if (rv != 0)
   13079 		goto out;
   13080 
   13081 	sc->sc_sfptype = val;
   13082 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   13083 		mediatype = WM_MEDIATYPE_SERDES;
   13084 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   13085 		sc->sc_flags |= WM_F_SGMII;
   13086 		mediatype = WM_MEDIATYPE_COPPER;
   13087 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   13088 		sc->sc_flags |= WM_F_SGMII;
   13089 		mediatype = WM_MEDIATYPE_SERDES;
   13090 	} else {
   13091 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   13092 		    __func__, sc->sc_sfptype);
   13093 		sc->sc_sfptype = 0; /* XXX unknown */
   13094 	}
   13095 
   13096 out:
   13097 	/* Restore I2C interface setting */
   13098 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13099 
   13100 	return mediatype;
   13101 }
   13102 
   13103 /*
   13104  * NVM related.
   13105  * Microwire, SPI (w/wo EERD) and Flash.
   13106  */
   13107 
   13108 /* Both spi and uwire */
   13109 
   13110 /*
   13111  * wm_eeprom_sendbits:
   13112  *
   13113  *	Send a series of bits to the EEPROM.
   13114  */
   13115 static void
   13116 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   13117 {
   13118 	uint32_t reg;
   13119 	int x;
   13120 
   13121 	reg = CSR_READ(sc, WMREG_EECD);
   13122 
   13123 	for (x = nbits; x > 0; x--) {
   13124 		if (bits & (1U << (x - 1)))
   13125 			reg |= EECD_DI;
   13126 		else
   13127 			reg &= ~EECD_DI;
   13128 		CSR_WRITE(sc, WMREG_EECD, reg);
   13129 		CSR_WRITE_FLUSH(sc);
   13130 		delay(2);
   13131 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13132 		CSR_WRITE_FLUSH(sc);
   13133 		delay(2);
   13134 		CSR_WRITE(sc, WMREG_EECD, reg);
   13135 		CSR_WRITE_FLUSH(sc);
   13136 		delay(2);
   13137 	}
   13138 }
   13139 
   13140 /*
   13141  * wm_eeprom_recvbits:
   13142  *
   13143  *	Receive a series of bits from the EEPROM.
   13144  */
   13145 static void
   13146 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13147 {
   13148 	uint32_t reg, val;
   13149 	int x;
   13150 
   13151 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13152 
   13153 	val = 0;
   13154 	for (x = nbits; x > 0; x--) {
   13155 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13156 		CSR_WRITE_FLUSH(sc);
   13157 		delay(2);
   13158 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13159 			val |= (1U << (x - 1));
   13160 		CSR_WRITE(sc, WMREG_EECD, reg);
   13161 		CSR_WRITE_FLUSH(sc);
   13162 		delay(2);
   13163 	}
   13164 	*valp = val;
   13165 }
   13166 
   13167 /* Microwire */
   13168 
   13169 /*
   13170  * wm_nvm_read_uwire:
   13171  *
   13172  *	Read a word from the EEPROM using the MicroWire protocol.
   13173  */
   13174 static int
   13175 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13176 {
   13177 	uint32_t reg, val;
   13178 	int i;
   13179 
   13180 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13181 		device_xname(sc->sc_dev), __func__));
   13182 
   13183 	if (sc->nvm.acquire(sc) != 0)
   13184 		return -1;
   13185 
   13186 	for (i = 0; i < wordcnt; i++) {
   13187 		/* Clear SK and DI. */
   13188 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13189 		CSR_WRITE(sc, WMREG_EECD, reg);
   13190 
   13191 		/*
   13192 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13193 		 * and Xen.
   13194 		 *
   13195 		 * We use this workaround only for 82540 because qemu's
   13196 		 * e1000 act as 82540.
   13197 		 */
   13198 		if (sc->sc_type == WM_T_82540) {
   13199 			reg |= EECD_SK;
   13200 			CSR_WRITE(sc, WMREG_EECD, reg);
   13201 			reg &= ~EECD_SK;
   13202 			CSR_WRITE(sc, WMREG_EECD, reg);
   13203 			CSR_WRITE_FLUSH(sc);
   13204 			delay(2);
   13205 		}
   13206 		/* XXX: end of workaround */
   13207 
   13208 		/* Set CHIP SELECT. */
   13209 		reg |= EECD_CS;
   13210 		CSR_WRITE(sc, WMREG_EECD, reg);
   13211 		CSR_WRITE_FLUSH(sc);
   13212 		delay(2);
   13213 
   13214 		/* Shift in the READ command. */
   13215 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13216 
   13217 		/* Shift in address. */
   13218 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13219 
   13220 		/* Shift out the data. */
   13221 		wm_eeprom_recvbits(sc, &val, 16);
   13222 		data[i] = val & 0xffff;
   13223 
   13224 		/* Clear CHIP SELECT. */
   13225 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13226 		CSR_WRITE(sc, WMREG_EECD, reg);
   13227 		CSR_WRITE_FLUSH(sc);
   13228 		delay(2);
   13229 	}
   13230 
   13231 	sc->nvm.release(sc);
   13232 	return 0;
   13233 }
   13234 
   13235 /* SPI */
   13236 
   13237 /*
   13238  * Set SPI and FLASH related information from the EECD register.
   13239  * For 82541 and 82547, the word size is taken from EEPROM.
   13240  */
   13241 static int
   13242 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13243 {
   13244 	int size;
   13245 	uint32_t reg;
   13246 	uint16_t data;
   13247 
   13248 	reg = CSR_READ(sc, WMREG_EECD);
   13249 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13250 
   13251 	/* Read the size of NVM from EECD by default */
   13252 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13253 	switch (sc->sc_type) {
   13254 	case WM_T_82541:
   13255 	case WM_T_82541_2:
   13256 	case WM_T_82547:
   13257 	case WM_T_82547_2:
   13258 		/* Set dummy value to access EEPROM */
   13259 		sc->sc_nvm_wordsize = 64;
   13260 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13261 			aprint_error_dev(sc->sc_dev,
   13262 			    "%s: failed to read EEPROM size\n", __func__);
   13263 		}
   13264 		reg = data;
   13265 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13266 		if (size == 0)
   13267 			size = 6; /* 64 word size */
   13268 		else
   13269 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13270 		break;
   13271 	case WM_T_80003:
   13272 	case WM_T_82571:
   13273 	case WM_T_82572:
   13274 	case WM_T_82573: /* SPI case */
   13275 	case WM_T_82574: /* SPI case */
   13276 	case WM_T_82583: /* SPI case */
   13277 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13278 		if (size > 14)
   13279 			size = 14;
   13280 		break;
   13281 	case WM_T_82575:
   13282 	case WM_T_82576:
   13283 	case WM_T_82580:
   13284 	case WM_T_I350:
   13285 	case WM_T_I354:
   13286 	case WM_T_I210:
   13287 	case WM_T_I211:
   13288 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13289 		if (size > 15)
   13290 			size = 15;
   13291 		break;
   13292 	default:
   13293 		aprint_error_dev(sc->sc_dev,
   13294 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13295 		return -1;
   13296 		break;
   13297 	}
   13298 
   13299 	sc->sc_nvm_wordsize = 1 << size;
   13300 
   13301 	return 0;
   13302 }
   13303 
   13304 /*
   13305  * wm_nvm_ready_spi:
   13306  *
   13307  *	Wait for a SPI EEPROM to be ready for commands.
   13308  */
   13309 static int
   13310 wm_nvm_ready_spi(struct wm_softc *sc)
   13311 {
   13312 	uint32_t val;
   13313 	int usec;
   13314 
   13315 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13316 		device_xname(sc->sc_dev), __func__));
   13317 
   13318 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13319 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13320 		wm_eeprom_recvbits(sc, &val, 8);
   13321 		if ((val & SPI_SR_RDY) == 0)
   13322 			break;
   13323 	}
   13324 	if (usec >= SPI_MAX_RETRIES) {
   13325 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13326 		return -1;
   13327 	}
   13328 	return 0;
   13329 }
   13330 
   13331 /*
   13332  * wm_nvm_read_spi:
   13333  *
   13334  *	Read a work from the EEPROM using the SPI protocol.
   13335  */
   13336 static int
   13337 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13338 {
   13339 	uint32_t reg, val;
   13340 	int i;
   13341 	uint8_t opc;
   13342 	int rv = 0;
   13343 
   13344 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13345 		device_xname(sc->sc_dev), __func__));
   13346 
   13347 	if (sc->nvm.acquire(sc) != 0)
   13348 		return -1;
   13349 
   13350 	/* Clear SK and CS. */
   13351 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13352 	CSR_WRITE(sc, WMREG_EECD, reg);
   13353 	CSR_WRITE_FLUSH(sc);
   13354 	delay(2);
   13355 
   13356 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13357 		goto out;
   13358 
   13359 	/* Toggle CS to flush commands. */
   13360 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13361 	CSR_WRITE_FLUSH(sc);
   13362 	delay(2);
   13363 	CSR_WRITE(sc, WMREG_EECD, reg);
   13364 	CSR_WRITE_FLUSH(sc);
   13365 	delay(2);
   13366 
   13367 	opc = SPI_OPC_READ;
   13368 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13369 		opc |= SPI_OPC_A8;
   13370 
   13371 	wm_eeprom_sendbits(sc, opc, 8);
   13372 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13373 
   13374 	for (i = 0; i < wordcnt; i++) {
   13375 		wm_eeprom_recvbits(sc, &val, 16);
   13376 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13377 	}
   13378 
   13379 	/* Raise CS and clear SK. */
   13380 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13381 	CSR_WRITE(sc, WMREG_EECD, reg);
   13382 	CSR_WRITE_FLUSH(sc);
   13383 	delay(2);
   13384 
   13385 out:
   13386 	sc->nvm.release(sc);
   13387 	return rv;
   13388 }
   13389 
   13390 /* Using with EERD */
   13391 
   13392 static int
   13393 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13394 {
   13395 	uint32_t attempts = 100000;
   13396 	uint32_t i, reg = 0;
   13397 	int32_t done = -1;
   13398 
   13399 	for (i = 0; i < attempts; i++) {
   13400 		reg = CSR_READ(sc, rw);
   13401 
   13402 		if (reg & EERD_DONE) {
   13403 			done = 0;
   13404 			break;
   13405 		}
   13406 		delay(5);
   13407 	}
   13408 
   13409 	return done;
   13410 }
   13411 
   13412 static int
   13413 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13414 {
   13415 	int i, eerd = 0;
   13416 	int rv = 0;
   13417 
   13418 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13419 		device_xname(sc->sc_dev), __func__));
   13420 
   13421 	if (sc->nvm.acquire(sc) != 0)
   13422 		return -1;
   13423 
   13424 	for (i = 0; i < wordcnt; i++) {
   13425 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13426 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13427 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13428 		if (rv != 0) {
   13429 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13430 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13431 			break;
   13432 		}
   13433 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13434 	}
   13435 
   13436 	sc->nvm.release(sc);
   13437 	return rv;
   13438 }
   13439 
   13440 /* Flash */
   13441 
   13442 static int
   13443 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13444 {
   13445 	uint32_t eecd;
   13446 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13447 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13448 	uint32_t nvm_dword = 0;
   13449 	uint8_t sig_byte = 0;
   13450 	int rv;
   13451 
   13452 	switch (sc->sc_type) {
   13453 	case WM_T_PCH_SPT:
   13454 	case WM_T_PCH_CNP:
   13455 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13456 		act_offset = ICH_NVM_SIG_WORD * 2;
   13457 
   13458 		/* Set bank to 0 in case flash read fails. */
   13459 		*bank = 0;
   13460 
   13461 		/* Check bank 0 */
   13462 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13463 		if (rv != 0)
   13464 			return rv;
   13465 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13466 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13467 			*bank = 0;
   13468 			return 0;
   13469 		}
   13470 
   13471 		/* Check bank 1 */
   13472 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13473 		    &nvm_dword);
   13474 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13475 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13476 			*bank = 1;
   13477 			return 0;
   13478 		}
   13479 		aprint_error_dev(sc->sc_dev,
   13480 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13481 		return -1;
   13482 	case WM_T_ICH8:
   13483 	case WM_T_ICH9:
   13484 		eecd = CSR_READ(sc, WMREG_EECD);
   13485 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13486 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13487 			return 0;
   13488 		}
   13489 		/* FALLTHROUGH */
   13490 	default:
   13491 		/* Default to 0 */
   13492 		*bank = 0;
   13493 
   13494 		/* Check bank 0 */
   13495 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13496 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13497 			*bank = 0;
   13498 			return 0;
   13499 		}
   13500 
   13501 		/* Check bank 1 */
   13502 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13503 		    &sig_byte);
   13504 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13505 			*bank = 1;
   13506 			return 0;
   13507 		}
   13508 	}
   13509 
   13510 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13511 		device_xname(sc->sc_dev)));
   13512 	return -1;
   13513 }
   13514 
   13515 /******************************************************************************
   13516  * This function does initial flash setup so that a new read/write/erase cycle
   13517  * can be started.
   13518  *
   13519  * sc - The pointer to the hw structure
   13520  ****************************************************************************/
   13521 static int32_t
   13522 wm_ich8_cycle_init(struct wm_softc *sc)
   13523 {
   13524 	uint16_t hsfsts;
   13525 	int32_t error = 1;
   13526 	int32_t i     = 0;
   13527 
   13528 	if (sc->sc_type >= WM_T_PCH_SPT)
   13529 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13530 	else
   13531 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13532 
   13533 	/* May be check the Flash Des Valid bit in Hw status */
   13534 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13535 		return error;
   13536 
   13537 	/* Clear FCERR in Hw status by writing 1 */
   13538 	/* Clear DAEL in Hw status by writing a 1 */
   13539 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13540 
   13541 	if (sc->sc_type >= WM_T_PCH_SPT)
   13542 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13543 	else
   13544 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13545 
   13546 	/*
   13547 	 * Either we should have a hardware SPI cycle in progress bit to check
   13548 	 * against, in order to start a new cycle or FDONE bit should be
   13549 	 * changed in the hardware so that it is 1 after hardware reset, which
   13550 	 * can then be used as an indication whether a cycle is in progress or
   13551 	 * has been completed .. we should also have some software semaphore
   13552 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13553 	 * threads access to those bits can be sequentiallized or a way so that
   13554 	 * 2 threads don't start the cycle at the same time
   13555 	 */
   13556 
   13557 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13558 		/*
   13559 		 * There is no cycle running at present, so we can start a
   13560 		 * cycle
   13561 		 */
   13562 
   13563 		/* Begin by setting Flash Cycle Done. */
   13564 		hsfsts |= HSFSTS_DONE;
   13565 		if (sc->sc_type >= WM_T_PCH_SPT)
   13566 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13567 			    hsfsts & 0xffffUL);
   13568 		else
   13569 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13570 		error = 0;
   13571 	} else {
   13572 		/*
   13573 		 * Otherwise poll for sometime so the current cycle has a
   13574 		 * chance to end before giving up.
   13575 		 */
   13576 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13577 			if (sc->sc_type >= WM_T_PCH_SPT)
   13578 				hsfsts = ICH8_FLASH_READ32(sc,
   13579 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13580 			else
   13581 				hsfsts = ICH8_FLASH_READ16(sc,
   13582 				    ICH_FLASH_HSFSTS);
   13583 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13584 				error = 0;
   13585 				break;
   13586 			}
   13587 			delay(1);
   13588 		}
   13589 		if (error == 0) {
   13590 			/*
   13591 			 * Successful in waiting for previous cycle to timeout,
   13592 			 * now set the Flash Cycle Done.
   13593 			 */
   13594 			hsfsts |= HSFSTS_DONE;
   13595 			if (sc->sc_type >= WM_T_PCH_SPT)
   13596 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13597 				    hsfsts & 0xffffUL);
   13598 			else
   13599 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13600 				    hsfsts);
   13601 		}
   13602 	}
   13603 	return error;
   13604 }
   13605 
   13606 /******************************************************************************
   13607  * This function starts a flash cycle and waits for its completion
   13608  *
   13609  * sc - The pointer to the hw structure
   13610  ****************************************************************************/
   13611 static int32_t
   13612 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13613 {
   13614 	uint16_t hsflctl;
   13615 	uint16_t hsfsts;
   13616 	int32_t error = 1;
   13617 	uint32_t i = 0;
   13618 
   13619 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13620 	if (sc->sc_type >= WM_T_PCH_SPT)
   13621 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13622 	else
   13623 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13624 	hsflctl |= HSFCTL_GO;
   13625 	if (sc->sc_type >= WM_T_PCH_SPT)
   13626 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13627 		    (uint32_t)hsflctl << 16);
   13628 	else
   13629 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13630 
   13631 	/* Wait till FDONE bit is set to 1 */
   13632 	do {
   13633 		if (sc->sc_type >= WM_T_PCH_SPT)
   13634 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13635 			    & 0xffffUL;
   13636 		else
   13637 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13638 		if (hsfsts & HSFSTS_DONE)
   13639 			break;
   13640 		delay(1);
   13641 		i++;
   13642 	} while (i < timeout);
   13643 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13644 		error = 0;
   13645 
   13646 	return error;
   13647 }
   13648 
   13649 /******************************************************************************
   13650  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13651  *
   13652  * sc - The pointer to the hw structure
   13653  * index - The index of the byte or word to read.
   13654  * size - Size of data to read, 1=byte 2=word, 4=dword
   13655  * data - Pointer to the word to store the value read.
   13656  *****************************************************************************/
   13657 static int32_t
   13658 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13659     uint32_t size, uint32_t *data)
   13660 {
   13661 	uint16_t hsfsts;
   13662 	uint16_t hsflctl;
   13663 	uint32_t flash_linear_address;
   13664 	uint32_t flash_data = 0;
   13665 	int32_t error = 1;
   13666 	int32_t count = 0;
   13667 
   13668 	if (size < 1  || size > 4 || data == 0x0 ||
   13669 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13670 		return error;
   13671 
   13672 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13673 	    sc->sc_ich8_flash_base;
   13674 
   13675 	do {
   13676 		delay(1);
   13677 		/* Steps */
   13678 		error = wm_ich8_cycle_init(sc);
   13679 		if (error)
   13680 			break;
   13681 
   13682 		if (sc->sc_type >= WM_T_PCH_SPT)
   13683 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13684 			    >> 16;
   13685 		else
   13686 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13687 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13688 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13689 		    & HSFCTL_BCOUNT_MASK;
   13690 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13691 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13692 			/*
   13693 			 * In SPT, This register is in Lan memory space, not
   13694 			 * flash. Therefore, only 32 bit access is supported.
   13695 			 */
   13696 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13697 			    (uint32_t)hsflctl << 16);
   13698 		} else
   13699 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13700 
   13701 		/*
   13702 		 * Write the last 24 bits of index into Flash Linear address
   13703 		 * field in Flash Address
   13704 		 */
   13705 		/* TODO: TBD maybe check the index against the size of flash */
   13706 
   13707 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13708 
   13709 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13710 
   13711 		/*
   13712 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13713 		 * the whole sequence a few more times, else read in (shift in)
   13714 		 * the Flash Data0, the order is least significant byte first
   13715 		 * msb to lsb
   13716 		 */
   13717 		if (error == 0) {
   13718 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13719 			if (size == 1)
   13720 				*data = (uint8_t)(flash_data & 0x000000FF);
   13721 			else if (size == 2)
   13722 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13723 			else if (size == 4)
   13724 				*data = (uint32_t)flash_data;
   13725 			break;
   13726 		} else {
   13727 			/*
   13728 			 * If we've gotten here, then things are probably
   13729 			 * completely hosed, but if the error condition is
   13730 			 * detected, it won't hurt to give it another try...
   13731 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13732 			 */
   13733 			if (sc->sc_type >= WM_T_PCH_SPT)
   13734 				hsfsts = ICH8_FLASH_READ32(sc,
   13735 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13736 			else
   13737 				hsfsts = ICH8_FLASH_READ16(sc,
   13738 				    ICH_FLASH_HSFSTS);
   13739 
   13740 			if (hsfsts & HSFSTS_ERR) {
   13741 				/* Repeat for some time before giving up. */
   13742 				continue;
   13743 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13744 				break;
   13745 		}
   13746 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13747 
   13748 	return error;
   13749 }
   13750 
   13751 /******************************************************************************
   13752  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13753  *
   13754  * sc - pointer to wm_hw structure
   13755  * index - The index of the byte to read.
   13756  * data - Pointer to a byte to store the value read.
   13757  *****************************************************************************/
   13758 static int32_t
   13759 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13760 {
   13761 	int32_t status;
   13762 	uint32_t word = 0;
   13763 
   13764 	status = wm_read_ich8_data(sc, index, 1, &word);
   13765 	if (status == 0)
   13766 		*data = (uint8_t)word;
   13767 	else
   13768 		*data = 0;
   13769 
   13770 	return status;
   13771 }
   13772 
   13773 /******************************************************************************
   13774  * Reads a word from the NVM using the ICH8 flash access registers.
   13775  *
   13776  * sc - pointer to wm_hw structure
   13777  * index - The starting byte index of the word to read.
   13778  * data - Pointer to a word to store the value read.
   13779  *****************************************************************************/
   13780 static int32_t
   13781 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13782 {
   13783 	int32_t status;
   13784 	uint32_t word = 0;
   13785 
   13786 	status = wm_read_ich8_data(sc, index, 2, &word);
   13787 	if (status == 0)
   13788 		*data = (uint16_t)word;
   13789 	else
   13790 		*data = 0;
   13791 
   13792 	return status;
   13793 }
   13794 
   13795 /******************************************************************************
   13796  * Reads a dword from the NVM using the ICH8 flash access registers.
   13797  *
   13798  * sc - pointer to wm_hw structure
   13799  * index - The starting byte index of the word to read.
   13800  * data - Pointer to a word to store the value read.
   13801  *****************************************************************************/
   13802 static int32_t
   13803 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13804 {
   13805 	int32_t status;
   13806 
   13807 	status = wm_read_ich8_data(sc, index, 4, data);
   13808 	return status;
   13809 }
   13810 
   13811 /******************************************************************************
   13812  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13813  * register.
   13814  *
   13815  * sc - Struct containing variables accessed by shared code
   13816  * offset - offset of word in the EEPROM to read
   13817  * data - word read from the EEPROM
   13818  * words - number of words to read
   13819  *****************************************************************************/
   13820 static int
   13821 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13822 {
   13823 	int32_t	 rv = 0;
   13824 	uint32_t flash_bank = 0;
   13825 	uint32_t act_offset = 0;
   13826 	uint32_t bank_offset = 0;
   13827 	uint16_t word = 0;
   13828 	uint16_t i = 0;
   13829 
   13830 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13831 		device_xname(sc->sc_dev), __func__));
   13832 
   13833 	if (sc->nvm.acquire(sc) != 0)
   13834 		return -1;
   13835 
   13836 	/*
   13837 	 * We need to know which is the valid flash bank.  In the event
   13838 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13839 	 * managing flash_bank. So it cannot be trusted and needs
   13840 	 * to be updated with each read.
   13841 	 */
   13842 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13843 	if (rv) {
   13844 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13845 			device_xname(sc->sc_dev)));
   13846 		flash_bank = 0;
   13847 	}
   13848 
   13849 	/*
   13850 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13851 	 * size
   13852 	 */
   13853 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13854 
   13855 	for (i = 0; i < words; i++) {
   13856 		/* The NVM part needs a byte offset, hence * 2 */
   13857 		act_offset = bank_offset + ((offset + i) * 2);
   13858 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13859 		if (rv) {
   13860 			aprint_error_dev(sc->sc_dev,
   13861 			    "%s: failed to read NVM\n", __func__);
   13862 			break;
   13863 		}
   13864 		data[i] = word;
   13865 	}
   13866 
   13867 	sc->nvm.release(sc);
   13868 	return rv;
   13869 }
   13870 
   13871 /******************************************************************************
   13872  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13873  * register.
   13874  *
   13875  * sc - Struct containing variables accessed by shared code
   13876  * offset - offset of word in the EEPROM to read
   13877  * data - word read from the EEPROM
   13878  * words - number of words to read
   13879  *****************************************************************************/
   13880 static int
   13881 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13882 {
   13883 	int32_t	 rv = 0;
   13884 	uint32_t flash_bank = 0;
   13885 	uint32_t act_offset = 0;
   13886 	uint32_t bank_offset = 0;
   13887 	uint32_t dword = 0;
   13888 	uint16_t i = 0;
   13889 
   13890 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13891 		device_xname(sc->sc_dev), __func__));
   13892 
   13893 	if (sc->nvm.acquire(sc) != 0)
   13894 		return -1;
   13895 
   13896 	/*
   13897 	 * We need to know which is the valid flash bank.  In the event
   13898 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13899 	 * managing flash_bank. So it cannot be trusted and needs
   13900 	 * to be updated with each read.
   13901 	 */
   13902 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13903 	if (rv) {
   13904 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13905 			device_xname(sc->sc_dev)));
   13906 		flash_bank = 0;
   13907 	}
   13908 
   13909 	/*
   13910 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13911 	 * size
   13912 	 */
   13913 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13914 
   13915 	for (i = 0; i < words; i++) {
   13916 		/* The NVM part needs a byte offset, hence * 2 */
   13917 		act_offset = bank_offset + ((offset + i) * 2);
   13918 		/* but we must read dword aligned, so mask ... */
   13919 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13920 		if (rv) {
   13921 			aprint_error_dev(sc->sc_dev,
   13922 			    "%s: failed to read NVM\n", __func__);
   13923 			break;
   13924 		}
   13925 		/* ... and pick out low or high word */
   13926 		if ((act_offset & 0x2) == 0)
   13927 			data[i] = (uint16_t)(dword & 0xFFFF);
   13928 		else
   13929 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13930 	}
   13931 
   13932 	sc->nvm.release(sc);
   13933 	return rv;
   13934 }
   13935 
   13936 /* iNVM */
   13937 
   13938 static int
   13939 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13940 {
   13941 	int32_t	 rv = 0;
   13942 	uint32_t invm_dword;
   13943 	uint16_t i;
   13944 	uint8_t record_type, word_address;
   13945 
   13946 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13947 		device_xname(sc->sc_dev), __func__));
   13948 
   13949 	for (i = 0; i < INVM_SIZE; i++) {
   13950 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13951 		/* Get record type */
   13952 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13953 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13954 			break;
   13955 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13956 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13957 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13958 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13959 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13960 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13961 			if (word_address == address) {
   13962 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13963 				rv = 0;
   13964 				break;
   13965 			}
   13966 		}
   13967 	}
   13968 
   13969 	return rv;
   13970 }
   13971 
   13972 static int
   13973 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13974 {
   13975 	int rv = 0;
   13976 	int i;
   13977 
   13978 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13979 		device_xname(sc->sc_dev), __func__));
   13980 
   13981 	if (sc->nvm.acquire(sc) != 0)
   13982 		return -1;
   13983 
   13984 	for (i = 0; i < words; i++) {
   13985 		switch (offset + i) {
   13986 		case NVM_OFF_MACADDR:
   13987 		case NVM_OFF_MACADDR1:
   13988 		case NVM_OFF_MACADDR2:
   13989 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13990 			if (rv != 0) {
   13991 				data[i] = 0xffff;
   13992 				rv = -1;
   13993 			}
   13994 			break;
   13995 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   13996 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13997 			if (rv != 0) {
   13998 				*data = INVM_DEFAULT_AL;
   13999 				rv = 0;
   14000 			}
   14001 			break;
   14002 		case NVM_OFF_CFG2:
   14003 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14004 			if (rv != 0) {
   14005 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   14006 				rv = 0;
   14007 			}
   14008 			break;
   14009 		case NVM_OFF_CFG4:
   14010 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14011 			if (rv != 0) {
   14012 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   14013 				rv = 0;
   14014 			}
   14015 			break;
   14016 		case NVM_OFF_LED_1_CFG:
   14017 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14018 			if (rv != 0) {
   14019 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   14020 				rv = 0;
   14021 			}
   14022 			break;
   14023 		case NVM_OFF_LED_0_2_CFG:
   14024 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14025 			if (rv != 0) {
   14026 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   14027 				rv = 0;
   14028 			}
   14029 			break;
   14030 		case NVM_OFF_ID_LED_SETTINGS:
   14031 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14032 			if (rv != 0) {
   14033 				*data = ID_LED_RESERVED_FFFF;
   14034 				rv = 0;
   14035 			}
   14036 			break;
   14037 		default:
   14038 			DPRINTF(sc, WM_DEBUG_NVM,
   14039 			    ("NVM word 0x%02x is not mapped.\n", offset));
   14040 			*data = NVM_RESERVED_WORD;
   14041 			break;
   14042 		}
   14043 	}
   14044 
   14045 	sc->nvm.release(sc);
   14046 	return rv;
   14047 }
   14048 
   14049 /* Lock, detecting NVM type, validate checksum, version and read */
   14050 
   14051 static int
   14052 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   14053 {
   14054 	uint32_t eecd = 0;
   14055 
   14056 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   14057 	    || sc->sc_type == WM_T_82583) {
   14058 		eecd = CSR_READ(sc, WMREG_EECD);
   14059 
   14060 		/* Isolate bits 15 & 16 */
   14061 		eecd = ((eecd >> 15) & 0x03);
   14062 
   14063 		/* If both bits are set, device is Flash type */
   14064 		if (eecd == 0x03)
   14065 			return 0;
   14066 	}
   14067 	return 1;
   14068 }
   14069 
   14070 static int
   14071 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   14072 {
   14073 	uint32_t eec;
   14074 
   14075 	eec = CSR_READ(sc, WMREG_EEC);
   14076 	if ((eec & EEC_FLASH_DETECTED) != 0)
   14077 		return 1;
   14078 
   14079 	return 0;
   14080 }
   14081 
   14082 /*
   14083  * wm_nvm_validate_checksum
   14084  *
   14085  * The checksum is defined as the sum of the first 64 (16 bit) words.
   14086  */
   14087 static int
   14088 wm_nvm_validate_checksum(struct wm_softc *sc)
   14089 {
   14090 	uint16_t checksum;
   14091 	uint16_t eeprom_data;
   14092 #ifdef WM_DEBUG
   14093 	uint16_t csum_wordaddr, valid_checksum;
   14094 #endif
   14095 	int i;
   14096 
   14097 	checksum = 0;
   14098 
   14099 	/* Don't check for I211 */
   14100 	if (sc->sc_type == WM_T_I211)
   14101 		return 0;
   14102 
   14103 #ifdef WM_DEBUG
   14104 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   14105 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   14106 		csum_wordaddr = NVM_OFF_COMPAT;
   14107 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   14108 	} else {
   14109 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   14110 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   14111 	}
   14112 
   14113 	/* Dump EEPROM image for debug */
   14114 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14115 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14116 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   14117 		/* XXX PCH_SPT? */
   14118 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   14119 		if ((eeprom_data & valid_checksum) == 0)
   14120 			DPRINTF(sc, WM_DEBUG_NVM,
   14121 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   14122 				device_xname(sc->sc_dev), eeprom_data,
   14123 				    valid_checksum));
   14124 	}
   14125 
   14126 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   14127 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   14128 		for (i = 0; i < NVM_SIZE; i++) {
   14129 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14130 				printf("XXXX ");
   14131 			else
   14132 				printf("%04hx ", eeprom_data);
   14133 			if (i % 8 == 7)
   14134 				printf("\n");
   14135 		}
   14136 	}
   14137 
   14138 #endif /* WM_DEBUG */
   14139 
   14140 	for (i = 0; i < NVM_SIZE; i++) {
   14141 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14142 			return 1;
   14143 		checksum += eeprom_data;
   14144 	}
   14145 
   14146 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14147 #ifdef WM_DEBUG
   14148 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14149 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14150 #endif
   14151 	}
   14152 
   14153 	return 0;
   14154 }
   14155 
   14156 static void
   14157 wm_nvm_version_invm(struct wm_softc *sc)
   14158 {
   14159 	uint32_t dword;
   14160 
   14161 	/*
   14162 	 * Linux's code to decode version is very strange, so we don't
   14163 	 * obey that algorithm and just use word 61 as the document.
   14164 	 * Perhaps it's not perfect though...
   14165 	 *
   14166 	 * Example:
   14167 	 *
   14168 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14169 	 */
   14170 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14171 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14172 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14173 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14174 }
   14175 
   14176 static void
   14177 wm_nvm_version(struct wm_softc *sc)
   14178 {
   14179 	uint16_t major, minor, build, patch;
   14180 	uint16_t uid0, uid1;
   14181 	uint16_t nvm_data;
   14182 	uint16_t off;
   14183 	bool check_version = false;
   14184 	bool check_optionrom = false;
   14185 	bool have_build = false;
   14186 	bool have_uid = true;
   14187 
   14188 	/*
   14189 	 * Version format:
   14190 	 *
   14191 	 * XYYZ
   14192 	 * X0YZ
   14193 	 * X0YY
   14194 	 *
   14195 	 * Example:
   14196 	 *
   14197 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14198 	 *	82571	0x50a6	5.10.6?
   14199 	 *	82572	0x506a	5.6.10?
   14200 	 *	82572EI	0x5069	5.6.9?
   14201 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14202 	 *		0x2013	2.1.3?
   14203 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14204 	 * ICH8+82567	0x0040	0.4.0?
   14205 	 * ICH9+82566	0x1040	1.4.0?
   14206 	 *ICH10+82567	0x0043	0.4.3?
   14207 	 *  PCH+82577	0x00c1	0.12.1?
   14208 	 * PCH2+82579	0x00d3	0.13.3?
   14209 	 *		0x00d4	0.13.4?
   14210 	 *  LPT+I218	0x0023	0.2.3?
   14211 	 *  SPT+I219	0x0084	0.8.4?
   14212 	 *  CNP+I219	0x0054	0.5.4?
   14213 	 */
   14214 
   14215 	/*
   14216 	 * XXX
   14217 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14218 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   14219 	 */
   14220 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14221 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14222 		have_uid = false;
   14223 
   14224 	switch (sc->sc_type) {
   14225 	case WM_T_82571:
   14226 	case WM_T_82572:
   14227 	case WM_T_82574:
   14228 	case WM_T_82583:
   14229 		check_version = true;
   14230 		check_optionrom = true;
   14231 		have_build = true;
   14232 		break;
   14233 	case WM_T_ICH8:
   14234 	case WM_T_ICH9:
   14235 	case WM_T_ICH10:
   14236 	case WM_T_PCH:
   14237 	case WM_T_PCH2:
   14238 	case WM_T_PCH_LPT:
   14239 	case WM_T_PCH_SPT:
   14240 	case WM_T_PCH_CNP:
   14241 		check_version = true;
   14242 		have_build = true;
   14243 		have_uid = false;
   14244 		break;
   14245 	case WM_T_82575:
   14246 	case WM_T_82576:
   14247 	case WM_T_82580:
   14248 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14249 			check_version = true;
   14250 		break;
   14251 	case WM_T_I211:
   14252 		wm_nvm_version_invm(sc);
   14253 		have_uid = false;
   14254 		goto printver;
   14255 	case WM_T_I210:
   14256 		if (!wm_nvm_flash_presence_i210(sc)) {
   14257 			wm_nvm_version_invm(sc);
   14258 			have_uid = false;
   14259 			goto printver;
   14260 		}
   14261 		/* FALLTHROUGH */
   14262 	case WM_T_I350:
   14263 	case WM_T_I354:
   14264 		check_version = true;
   14265 		check_optionrom = true;
   14266 		break;
   14267 	default:
   14268 		return;
   14269 	}
   14270 	if (check_version
   14271 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14272 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14273 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14274 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14275 			build = nvm_data & NVM_BUILD_MASK;
   14276 			have_build = true;
   14277 		} else
   14278 			minor = nvm_data & 0x00ff;
   14279 
   14280 		/* Decimal */
   14281 		minor = (minor / 16) * 10 + (minor % 16);
   14282 		sc->sc_nvm_ver_major = major;
   14283 		sc->sc_nvm_ver_minor = minor;
   14284 
   14285 printver:
   14286 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14287 		    sc->sc_nvm_ver_minor);
   14288 		if (have_build) {
   14289 			sc->sc_nvm_ver_build = build;
   14290 			aprint_verbose(".%d", build);
   14291 		}
   14292 	}
   14293 
   14294 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14295 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14296 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14297 		/* Option ROM Version */
   14298 		if ((off != 0x0000) && (off != 0xffff)) {
   14299 			int rv;
   14300 
   14301 			off += NVM_COMBO_VER_OFF;
   14302 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14303 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14304 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14305 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14306 				/* 16bits */
   14307 				major = uid0 >> 8;
   14308 				build = (uid0 << 8) | (uid1 >> 8);
   14309 				patch = uid1 & 0x00ff;
   14310 				aprint_verbose(", option ROM Version %d.%d.%d",
   14311 				    major, build, patch);
   14312 			}
   14313 		}
   14314 	}
   14315 
   14316 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14317 		aprint_verbose(", Image Unique ID %08x",
   14318 		    ((uint32_t)uid1 << 16) | uid0);
   14319 }
   14320 
   14321 /*
   14322  * wm_nvm_read:
   14323  *
   14324  *	Read data from the serial EEPROM.
   14325  */
   14326 static int
   14327 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14328 {
   14329 	int rv;
   14330 
   14331 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14332 		device_xname(sc->sc_dev), __func__));
   14333 
   14334 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14335 		return -1;
   14336 
   14337 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14338 
   14339 	return rv;
   14340 }
   14341 
   14342 /*
   14343  * Hardware semaphores.
   14344  * Very complexed...
   14345  */
   14346 
   14347 static int
   14348 wm_get_null(struct wm_softc *sc)
   14349 {
   14350 
   14351 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14352 		device_xname(sc->sc_dev), __func__));
   14353 	return 0;
   14354 }
   14355 
   14356 static void
   14357 wm_put_null(struct wm_softc *sc)
   14358 {
   14359 
   14360 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14361 		device_xname(sc->sc_dev), __func__));
   14362 	return;
   14363 }
   14364 
   14365 static int
   14366 wm_get_eecd(struct wm_softc *sc)
   14367 {
   14368 	uint32_t reg;
   14369 	int x;
   14370 
   14371 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14372 		device_xname(sc->sc_dev), __func__));
   14373 
   14374 	reg = CSR_READ(sc, WMREG_EECD);
   14375 
   14376 	/* Request EEPROM access. */
   14377 	reg |= EECD_EE_REQ;
   14378 	CSR_WRITE(sc, WMREG_EECD, reg);
   14379 
   14380 	/* ..and wait for it to be granted. */
   14381 	for (x = 0; x < 1000; x++) {
   14382 		reg = CSR_READ(sc, WMREG_EECD);
   14383 		if (reg & EECD_EE_GNT)
   14384 			break;
   14385 		delay(5);
   14386 	}
   14387 	if ((reg & EECD_EE_GNT) == 0) {
   14388 		aprint_error_dev(sc->sc_dev,
   14389 		    "could not acquire EEPROM GNT\n");
   14390 		reg &= ~EECD_EE_REQ;
   14391 		CSR_WRITE(sc, WMREG_EECD, reg);
   14392 		return -1;
   14393 	}
   14394 
   14395 	return 0;
   14396 }
   14397 
   14398 static void
   14399 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14400 {
   14401 
   14402 	*eecd |= EECD_SK;
   14403 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14404 	CSR_WRITE_FLUSH(sc);
   14405 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14406 		delay(1);
   14407 	else
   14408 		delay(50);
   14409 }
   14410 
   14411 static void
   14412 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14413 {
   14414 
   14415 	*eecd &= ~EECD_SK;
   14416 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14417 	CSR_WRITE_FLUSH(sc);
   14418 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14419 		delay(1);
   14420 	else
   14421 		delay(50);
   14422 }
   14423 
   14424 static void
   14425 wm_put_eecd(struct wm_softc *sc)
   14426 {
   14427 	uint32_t reg;
   14428 
   14429 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14430 		device_xname(sc->sc_dev), __func__));
   14431 
   14432 	/* Stop nvm */
   14433 	reg = CSR_READ(sc, WMREG_EECD);
   14434 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14435 		/* Pull CS high */
   14436 		reg |= EECD_CS;
   14437 		wm_nvm_eec_clock_lower(sc, &reg);
   14438 	} else {
   14439 		/* CS on Microwire is active-high */
   14440 		reg &= ~(EECD_CS | EECD_DI);
   14441 		CSR_WRITE(sc, WMREG_EECD, reg);
   14442 		wm_nvm_eec_clock_raise(sc, &reg);
   14443 		wm_nvm_eec_clock_lower(sc, &reg);
   14444 	}
   14445 
   14446 	reg = CSR_READ(sc, WMREG_EECD);
   14447 	reg &= ~EECD_EE_REQ;
   14448 	CSR_WRITE(sc, WMREG_EECD, reg);
   14449 
   14450 	return;
   14451 }
   14452 
   14453 /*
   14454  * Get hardware semaphore.
   14455  * Same as e1000_get_hw_semaphore_generic()
   14456  */
   14457 static int
   14458 wm_get_swsm_semaphore(struct wm_softc *sc)
   14459 {
   14460 	int32_t timeout;
   14461 	uint32_t swsm;
   14462 
   14463 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14464 		device_xname(sc->sc_dev), __func__));
   14465 	KASSERT(sc->sc_nvm_wordsize > 0);
   14466 
   14467 retry:
   14468 	/* Get the SW semaphore. */
   14469 	timeout = sc->sc_nvm_wordsize + 1;
   14470 	while (timeout) {
   14471 		swsm = CSR_READ(sc, WMREG_SWSM);
   14472 
   14473 		if ((swsm & SWSM_SMBI) == 0)
   14474 			break;
   14475 
   14476 		delay(50);
   14477 		timeout--;
   14478 	}
   14479 
   14480 	if (timeout == 0) {
   14481 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14482 			/*
   14483 			 * In rare circumstances, the SW semaphore may already
   14484 			 * be held unintentionally. Clear the semaphore once
   14485 			 * before giving up.
   14486 			 */
   14487 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14488 			wm_put_swsm_semaphore(sc);
   14489 			goto retry;
   14490 		}
   14491 		aprint_error_dev(sc->sc_dev,
   14492 		    "could not acquire SWSM SMBI\n");
   14493 		return 1;
   14494 	}
   14495 
   14496 	/* Get the FW semaphore. */
   14497 	timeout = sc->sc_nvm_wordsize + 1;
   14498 	while (timeout) {
   14499 		swsm = CSR_READ(sc, WMREG_SWSM);
   14500 		swsm |= SWSM_SWESMBI;
   14501 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14502 		/* If we managed to set the bit we got the semaphore. */
   14503 		swsm = CSR_READ(sc, WMREG_SWSM);
   14504 		if (swsm & SWSM_SWESMBI)
   14505 			break;
   14506 
   14507 		delay(50);
   14508 		timeout--;
   14509 	}
   14510 
   14511 	if (timeout == 0) {
   14512 		aprint_error_dev(sc->sc_dev,
   14513 		    "could not acquire SWSM SWESMBI\n");
   14514 		/* Release semaphores */
   14515 		wm_put_swsm_semaphore(sc);
   14516 		return 1;
   14517 	}
   14518 	return 0;
   14519 }
   14520 
   14521 /*
   14522  * Put hardware semaphore.
   14523  * Same as e1000_put_hw_semaphore_generic()
   14524  */
   14525 static void
   14526 wm_put_swsm_semaphore(struct wm_softc *sc)
   14527 {
   14528 	uint32_t swsm;
   14529 
   14530 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14531 		device_xname(sc->sc_dev), __func__));
   14532 
   14533 	swsm = CSR_READ(sc, WMREG_SWSM);
   14534 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14535 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14536 }
   14537 
   14538 /*
   14539  * Get SW/FW semaphore.
   14540  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14541  */
   14542 static int
   14543 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14544 {
   14545 	uint32_t swfw_sync;
   14546 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14547 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14548 	int timeout;
   14549 
   14550 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14551 		device_xname(sc->sc_dev), __func__));
   14552 
   14553 	if (sc->sc_type == WM_T_80003)
   14554 		timeout = 50;
   14555 	else
   14556 		timeout = 200;
   14557 
   14558 	while (timeout) {
   14559 		if (wm_get_swsm_semaphore(sc)) {
   14560 			aprint_error_dev(sc->sc_dev,
   14561 			    "%s: failed to get semaphore\n",
   14562 			    __func__);
   14563 			return 1;
   14564 		}
   14565 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14566 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14567 			swfw_sync |= swmask;
   14568 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14569 			wm_put_swsm_semaphore(sc);
   14570 			return 0;
   14571 		}
   14572 		wm_put_swsm_semaphore(sc);
   14573 		delay(5000);
   14574 		timeout--;
   14575 	}
   14576 	device_printf(sc->sc_dev,
   14577 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14578 	    mask, swfw_sync);
   14579 	return 1;
   14580 }
   14581 
   14582 static void
   14583 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14584 {
   14585 	uint32_t swfw_sync;
   14586 
   14587 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14588 		device_xname(sc->sc_dev), __func__));
   14589 
   14590 	while (wm_get_swsm_semaphore(sc) != 0)
   14591 		continue;
   14592 
   14593 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14594 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14595 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14596 
   14597 	wm_put_swsm_semaphore(sc);
   14598 }
   14599 
   14600 static int
   14601 wm_get_nvm_80003(struct wm_softc *sc)
   14602 {
   14603 	int rv;
   14604 
   14605 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14606 		device_xname(sc->sc_dev), __func__));
   14607 
   14608 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14609 		aprint_error_dev(sc->sc_dev,
   14610 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14611 		return rv;
   14612 	}
   14613 
   14614 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14615 	    && (rv = wm_get_eecd(sc)) != 0) {
   14616 		aprint_error_dev(sc->sc_dev,
   14617 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14618 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14619 		return rv;
   14620 	}
   14621 
   14622 	return 0;
   14623 }
   14624 
   14625 static void
   14626 wm_put_nvm_80003(struct wm_softc *sc)
   14627 {
   14628 
   14629 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14630 		device_xname(sc->sc_dev), __func__));
   14631 
   14632 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14633 		wm_put_eecd(sc);
   14634 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14635 }
   14636 
   14637 static int
   14638 wm_get_nvm_82571(struct wm_softc *sc)
   14639 {
   14640 	int rv;
   14641 
   14642 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14643 		device_xname(sc->sc_dev), __func__));
   14644 
   14645 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14646 		return rv;
   14647 
   14648 	switch (sc->sc_type) {
   14649 	case WM_T_82573:
   14650 		break;
   14651 	default:
   14652 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14653 			rv = wm_get_eecd(sc);
   14654 		break;
   14655 	}
   14656 
   14657 	if (rv != 0) {
   14658 		aprint_error_dev(sc->sc_dev,
   14659 		    "%s: failed to get semaphore\n",
   14660 		    __func__);
   14661 		wm_put_swsm_semaphore(sc);
   14662 	}
   14663 
   14664 	return rv;
   14665 }
   14666 
   14667 static void
   14668 wm_put_nvm_82571(struct wm_softc *sc)
   14669 {
   14670 
   14671 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14672 		device_xname(sc->sc_dev), __func__));
   14673 
   14674 	switch (sc->sc_type) {
   14675 	case WM_T_82573:
   14676 		break;
   14677 	default:
   14678 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14679 			wm_put_eecd(sc);
   14680 		break;
   14681 	}
   14682 
   14683 	wm_put_swsm_semaphore(sc);
   14684 }
   14685 
   14686 static int
   14687 wm_get_phy_82575(struct wm_softc *sc)
   14688 {
   14689 
   14690 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14691 		device_xname(sc->sc_dev), __func__));
   14692 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14693 }
   14694 
   14695 static void
   14696 wm_put_phy_82575(struct wm_softc *sc)
   14697 {
   14698 
   14699 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14700 		device_xname(sc->sc_dev), __func__));
   14701 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14702 }
   14703 
   14704 static int
   14705 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14706 {
   14707 	uint32_t ext_ctrl;
   14708 	int timeout = 200;
   14709 
   14710 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14711 		device_xname(sc->sc_dev), __func__));
   14712 
   14713 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14714 	for (timeout = 0; timeout < 200; timeout++) {
   14715 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14716 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14717 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14718 
   14719 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14720 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14721 			return 0;
   14722 		delay(5000);
   14723 	}
   14724 	device_printf(sc->sc_dev,
   14725 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14726 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14727 	return 1;
   14728 }
   14729 
   14730 static void
   14731 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14732 {
   14733 	uint32_t ext_ctrl;
   14734 
   14735 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14736 		device_xname(sc->sc_dev), __func__));
   14737 
   14738 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14739 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14740 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14741 
   14742 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14743 }
   14744 
   14745 static int
   14746 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14747 {
   14748 	uint32_t ext_ctrl;
   14749 	int timeout;
   14750 
   14751 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14752 		device_xname(sc->sc_dev), __func__));
   14753 	mutex_enter(sc->sc_ich_phymtx);
   14754 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14755 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14756 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14757 			break;
   14758 		delay(1000);
   14759 	}
   14760 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14761 		device_printf(sc->sc_dev,
   14762 		    "SW has already locked the resource\n");
   14763 		goto out;
   14764 	}
   14765 
   14766 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14767 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14768 	for (timeout = 0; timeout < 1000; timeout++) {
   14769 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14770 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14771 			break;
   14772 		delay(1000);
   14773 	}
   14774 	if (timeout >= 1000) {
   14775 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14776 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14777 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14778 		goto out;
   14779 	}
   14780 	return 0;
   14781 
   14782 out:
   14783 	mutex_exit(sc->sc_ich_phymtx);
   14784 	return 1;
   14785 }
   14786 
   14787 static void
   14788 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14789 {
   14790 	uint32_t ext_ctrl;
   14791 
   14792 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14793 		device_xname(sc->sc_dev), __func__));
   14794 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14795 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14796 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14797 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14798 	} else {
   14799 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14800 	}
   14801 
   14802 	mutex_exit(sc->sc_ich_phymtx);
   14803 }
   14804 
   14805 static int
   14806 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14807 {
   14808 
   14809 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14810 		device_xname(sc->sc_dev), __func__));
   14811 	mutex_enter(sc->sc_ich_nvmmtx);
   14812 
   14813 	return 0;
   14814 }
   14815 
   14816 static void
   14817 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14818 {
   14819 
   14820 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14821 		device_xname(sc->sc_dev), __func__));
   14822 	mutex_exit(sc->sc_ich_nvmmtx);
   14823 }
   14824 
   14825 static int
   14826 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14827 {
   14828 	int i = 0;
   14829 	uint32_t reg;
   14830 
   14831 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14832 		device_xname(sc->sc_dev), __func__));
   14833 
   14834 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14835 	do {
   14836 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14837 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14838 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14839 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14840 			break;
   14841 		delay(2*1000);
   14842 		i++;
   14843 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14844 
   14845 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14846 		wm_put_hw_semaphore_82573(sc);
   14847 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14848 		    device_xname(sc->sc_dev));
   14849 		return -1;
   14850 	}
   14851 
   14852 	return 0;
   14853 }
   14854 
   14855 static void
   14856 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14857 {
   14858 	uint32_t reg;
   14859 
   14860 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14861 		device_xname(sc->sc_dev), __func__));
   14862 
   14863 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14864 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14865 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14866 }
   14867 
   14868 /*
   14869  * Management mode and power management related subroutines.
   14870  * BMC, AMT, suspend/resume and EEE.
   14871  */
   14872 
   14873 #ifdef WM_WOL
   14874 static int
   14875 wm_check_mng_mode(struct wm_softc *sc)
   14876 {
   14877 	int rv;
   14878 
   14879 	switch (sc->sc_type) {
   14880 	case WM_T_ICH8:
   14881 	case WM_T_ICH9:
   14882 	case WM_T_ICH10:
   14883 	case WM_T_PCH:
   14884 	case WM_T_PCH2:
   14885 	case WM_T_PCH_LPT:
   14886 	case WM_T_PCH_SPT:
   14887 	case WM_T_PCH_CNP:
   14888 		rv = wm_check_mng_mode_ich8lan(sc);
   14889 		break;
   14890 	case WM_T_82574:
   14891 	case WM_T_82583:
   14892 		rv = wm_check_mng_mode_82574(sc);
   14893 		break;
   14894 	case WM_T_82571:
   14895 	case WM_T_82572:
   14896 	case WM_T_82573:
   14897 	case WM_T_80003:
   14898 		rv = wm_check_mng_mode_generic(sc);
   14899 		break;
   14900 	default:
   14901 		/* Noting to do */
   14902 		rv = 0;
   14903 		break;
   14904 	}
   14905 
   14906 	return rv;
   14907 }
   14908 
   14909 static int
   14910 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14911 {
   14912 	uint32_t fwsm;
   14913 
   14914 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14915 
   14916 	if (((fwsm & FWSM_FW_VALID) != 0)
   14917 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14918 		return 1;
   14919 
   14920 	return 0;
   14921 }
   14922 
   14923 static int
   14924 wm_check_mng_mode_82574(struct wm_softc *sc)
   14925 {
   14926 	uint16_t data;
   14927 
   14928 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14929 
   14930 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14931 		return 1;
   14932 
   14933 	return 0;
   14934 }
   14935 
   14936 static int
   14937 wm_check_mng_mode_generic(struct wm_softc *sc)
   14938 {
   14939 	uint32_t fwsm;
   14940 
   14941 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14942 
   14943 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14944 		return 1;
   14945 
   14946 	return 0;
   14947 }
   14948 #endif /* WM_WOL */
   14949 
   14950 static int
   14951 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14952 {
   14953 	uint32_t manc, fwsm, factps;
   14954 
   14955 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14956 		return 0;
   14957 
   14958 	manc = CSR_READ(sc, WMREG_MANC);
   14959 
   14960 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14961 		device_xname(sc->sc_dev), manc));
   14962 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14963 		return 0;
   14964 
   14965 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14966 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14967 		factps = CSR_READ(sc, WMREG_FACTPS);
   14968 		if (((factps & FACTPS_MNGCG) == 0)
   14969 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14970 			return 1;
   14971 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14972 		uint16_t data;
   14973 
   14974 		factps = CSR_READ(sc, WMREG_FACTPS);
   14975 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14976 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14977 			device_xname(sc->sc_dev), factps, data));
   14978 		if (((factps & FACTPS_MNGCG) == 0)
   14979 		    && ((data & NVM_CFG2_MNGM_MASK)
   14980 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14981 			return 1;
   14982 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14983 	    && ((manc & MANC_ASF_EN) == 0))
   14984 		return 1;
   14985 
   14986 	return 0;
   14987 }
   14988 
   14989 static bool
   14990 wm_phy_resetisblocked(struct wm_softc *sc)
   14991 {
   14992 	bool blocked = false;
   14993 	uint32_t reg;
   14994 	int i = 0;
   14995 
   14996 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   14997 		device_xname(sc->sc_dev), __func__));
   14998 
   14999 	switch (sc->sc_type) {
   15000 	case WM_T_ICH8:
   15001 	case WM_T_ICH9:
   15002 	case WM_T_ICH10:
   15003 	case WM_T_PCH:
   15004 	case WM_T_PCH2:
   15005 	case WM_T_PCH_LPT:
   15006 	case WM_T_PCH_SPT:
   15007 	case WM_T_PCH_CNP:
   15008 		do {
   15009 			reg = CSR_READ(sc, WMREG_FWSM);
   15010 			if ((reg & FWSM_RSPCIPHY) == 0) {
   15011 				blocked = true;
   15012 				delay(10*1000);
   15013 				continue;
   15014 			}
   15015 			blocked = false;
   15016 		} while (blocked && (i++ < 30));
   15017 		return blocked;
   15018 		break;
   15019 	case WM_T_82571:
   15020 	case WM_T_82572:
   15021 	case WM_T_82573:
   15022 	case WM_T_82574:
   15023 	case WM_T_82583:
   15024 	case WM_T_80003:
   15025 		reg = CSR_READ(sc, WMREG_MANC);
   15026 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   15027 			return true;
   15028 		else
   15029 			return false;
   15030 		break;
   15031 	default:
   15032 		/* No problem */
   15033 		break;
   15034 	}
   15035 
   15036 	return false;
   15037 }
   15038 
   15039 static void
   15040 wm_get_hw_control(struct wm_softc *sc)
   15041 {
   15042 	uint32_t reg;
   15043 
   15044 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15045 		device_xname(sc->sc_dev), __func__));
   15046 
   15047 	if (sc->sc_type == WM_T_82573) {
   15048 		reg = CSR_READ(sc, WMREG_SWSM);
   15049 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   15050 	} else if (sc->sc_type >= WM_T_82571) {
   15051 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15052 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   15053 	}
   15054 }
   15055 
   15056 static void
   15057 wm_release_hw_control(struct wm_softc *sc)
   15058 {
   15059 	uint32_t reg;
   15060 
   15061 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15062 		device_xname(sc->sc_dev), __func__));
   15063 
   15064 	if (sc->sc_type == WM_T_82573) {
   15065 		reg = CSR_READ(sc, WMREG_SWSM);
   15066 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   15067 	} else if (sc->sc_type >= WM_T_82571) {
   15068 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15069 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   15070 	}
   15071 }
   15072 
   15073 static void
   15074 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   15075 {
   15076 	uint32_t reg;
   15077 
   15078 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15079 		device_xname(sc->sc_dev), __func__));
   15080 
   15081 	if (sc->sc_type < WM_T_PCH2)
   15082 		return;
   15083 
   15084 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15085 
   15086 	if (gate)
   15087 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   15088 	else
   15089 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   15090 
   15091 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15092 }
   15093 
   15094 static int
   15095 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   15096 {
   15097 	uint32_t fwsm, reg;
   15098 	int rv = 0;
   15099 
   15100 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15101 		device_xname(sc->sc_dev), __func__));
   15102 
   15103 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   15104 	wm_gate_hw_phy_config_ich8lan(sc, true);
   15105 
   15106 	/* Disable ULP */
   15107 	wm_ulp_disable(sc);
   15108 
   15109 	/* Acquire PHY semaphore */
   15110 	rv = sc->phy.acquire(sc);
   15111 	if (rv != 0) {
   15112 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15113 		device_xname(sc->sc_dev), __func__));
   15114 		return -1;
   15115 	}
   15116 
   15117 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   15118 	 * inaccessible and resetting the PHY is not blocked, toggle the
   15119 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   15120 	 */
   15121 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15122 	switch (sc->sc_type) {
   15123 	case WM_T_PCH_LPT:
   15124 	case WM_T_PCH_SPT:
   15125 	case WM_T_PCH_CNP:
   15126 		if (wm_phy_is_accessible_pchlan(sc))
   15127 			break;
   15128 
   15129 		/* Before toggling LANPHYPC, see if PHY is accessible by
   15130 		 * forcing MAC to SMBus mode first.
   15131 		 */
   15132 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15133 		reg |= CTRL_EXT_FORCE_SMBUS;
   15134 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15135 #if 0
   15136 		/* XXX Isn't this required??? */
   15137 		CSR_WRITE_FLUSH(sc);
   15138 #endif
   15139 		/* Wait 50 milliseconds for MAC to finish any retries
   15140 		 * that it might be trying to perform from previous
   15141 		 * attempts to acknowledge any phy read requests.
   15142 		 */
   15143 		delay(50 * 1000);
   15144 		/* FALLTHROUGH */
   15145 	case WM_T_PCH2:
   15146 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15147 			break;
   15148 		/* FALLTHROUGH */
   15149 	case WM_T_PCH:
   15150 		if (sc->sc_type == WM_T_PCH)
   15151 			if ((fwsm & FWSM_FW_VALID) != 0)
   15152 				break;
   15153 
   15154 		if (wm_phy_resetisblocked(sc) == true) {
   15155 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15156 			break;
   15157 		}
   15158 
   15159 		/* Toggle LANPHYPC Value bit */
   15160 		wm_toggle_lanphypc_pch_lpt(sc);
   15161 
   15162 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15163 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15164 				break;
   15165 
   15166 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15167 			 * so ensure that the MAC is also out of SMBus mode
   15168 			 */
   15169 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15170 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15171 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15172 
   15173 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15174 				break;
   15175 			rv = -1;
   15176 		}
   15177 		break;
   15178 	default:
   15179 		break;
   15180 	}
   15181 
   15182 	/* Release semaphore */
   15183 	sc->phy.release(sc);
   15184 
   15185 	if (rv == 0) {
   15186 		/* Check to see if able to reset PHY.  Print error if not */
   15187 		if (wm_phy_resetisblocked(sc)) {
   15188 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15189 			goto out;
   15190 		}
   15191 
   15192 		/* Reset the PHY before any access to it.  Doing so, ensures
   15193 		 * that the PHY is in a known good state before we read/write
   15194 		 * PHY registers.  The generic reset is sufficient here,
   15195 		 * because we haven't determined the PHY type yet.
   15196 		 */
   15197 		if (wm_reset_phy(sc) != 0)
   15198 			goto out;
   15199 
   15200 		/* On a successful reset, possibly need to wait for the PHY
   15201 		 * to quiesce to an accessible state before returning control
   15202 		 * to the calling function.  If the PHY does not quiesce, then
   15203 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15204 		 *  the PHY is in.
   15205 		 */
   15206 		if (wm_phy_resetisblocked(sc))
   15207 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15208 	}
   15209 
   15210 out:
   15211 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15212 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15213 		delay(10*1000);
   15214 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15215 	}
   15216 
   15217 	return 0;
   15218 }
   15219 
   15220 static void
   15221 wm_init_manageability(struct wm_softc *sc)
   15222 {
   15223 
   15224 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15225 		device_xname(sc->sc_dev), __func__));
   15226 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15227 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15228 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15229 
   15230 		/* Disable hardware interception of ARP */
   15231 		manc &= ~MANC_ARP_EN;
   15232 
   15233 		/* Enable receiving management packets to the host */
   15234 		if (sc->sc_type >= WM_T_82571) {
   15235 			manc |= MANC_EN_MNG2HOST;
   15236 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15237 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15238 		}
   15239 
   15240 		CSR_WRITE(sc, WMREG_MANC, manc);
   15241 	}
   15242 }
   15243 
   15244 static void
   15245 wm_release_manageability(struct wm_softc *sc)
   15246 {
   15247 
   15248 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15249 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15250 
   15251 		manc |= MANC_ARP_EN;
   15252 		if (sc->sc_type >= WM_T_82571)
   15253 			manc &= ~MANC_EN_MNG2HOST;
   15254 
   15255 		CSR_WRITE(sc, WMREG_MANC, manc);
   15256 	}
   15257 }
   15258 
   15259 static void
   15260 wm_get_wakeup(struct wm_softc *sc)
   15261 {
   15262 
   15263 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15264 	switch (sc->sc_type) {
   15265 	case WM_T_82573:
   15266 	case WM_T_82583:
   15267 		sc->sc_flags |= WM_F_HAS_AMT;
   15268 		/* FALLTHROUGH */
   15269 	case WM_T_80003:
   15270 	case WM_T_82575:
   15271 	case WM_T_82576:
   15272 	case WM_T_82580:
   15273 	case WM_T_I350:
   15274 	case WM_T_I354:
   15275 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15276 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15277 		/* FALLTHROUGH */
   15278 	case WM_T_82541:
   15279 	case WM_T_82541_2:
   15280 	case WM_T_82547:
   15281 	case WM_T_82547_2:
   15282 	case WM_T_82571:
   15283 	case WM_T_82572:
   15284 	case WM_T_82574:
   15285 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15286 		break;
   15287 	case WM_T_ICH8:
   15288 	case WM_T_ICH9:
   15289 	case WM_T_ICH10:
   15290 	case WM_T_PCH:
   15291 	case WM_T_PCH2:
   15292 	case WM_T_PCH_LPT:
   15293 	case WM_T_PCH_SPT:
   15294 	case WM_T_PCH_CNP:
   15295 		sc->sc_flags |= WM_F_HAS_AMT;
   15296 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15297 		break;
   15298 	default:
   15299 		break;
   15300 	}
   15301 
   15302 	/* 1: HAS_MANAGE */
   15303 	if (wm_enable_mng_pass_thru(sc) != 0)
   15304 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15305 
   15306 	/*
   15307 	 * Note that the WOL flags is set after the resetting of the eeprom
   15308 	 * stuff
   15309 	 */
   15310 }
   15311 
   15312 /*
   15313  * Unconfigure Ultra Low Power mode.
   15314  * Only for I217 and newer (see below).
   15315  */
   15316 static int
   15317 wm_ulp_disable(struct wm_softc *sc)
   15318 {
   15319 	uint32_t reg;
   15320 	uint16_t phyreg;
   15321 	int i = 0, rv = 0;
   15322 
   15323 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15324 		device_xname(sc->sc_dev), __func__));
   15325 	/* Exclude old devices */
   15326 	if ((sc->sc_type < WM_T_PCH_LPT)
   15327 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15328 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15329 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15330 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15331 		return 0;
   15332 
   15333 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15334 		/* Request ME un-configure ULP mode in the PHY */
   15335 		reg = CSR_READ(sc, WMREG_H2ME);
   15336 		reg &= ~H2ME_ULP;
   15337 		reg |= H2ME_ENFORCE_SETTINGS;
   15338 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15339 
   15340 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15341 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15342 			if (i++ == 30) {
   15343 				device_printf(sc->sc_dev, "%s timed out\n",
   15344 				    __func__);
   15345 				return -1;
   15346 			}
   15347 			delay(10 * 1000);
   15348 		}
   15349 		reg = CSR_READ(sc, WMREG_H2ME);
   15350 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15351 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15352 
   15353 		return 0;
   15354 	}
   15355 
   15356 	/* Acquire semaphore */
   15357 	rv = sc->phy.acquire(sc);
   15358 	if (rv != 0) {
   15359 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15360 		device_xname(sc->sc_dev), __func__));
   15361 		return -1;
   15362 	}
   15363 
   15364 	/* Toggle LANPHYPC */
   15365 	wm_toggle_lanphypc_pch_lpt(sc);
   15366 
   15367 	/* Unforce SMBus mode in PHY */
   15368 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15369 	if (rv != 0) {
   15370 		uint32_t reg2;
   15371 
   15372 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15373 			__func__);
   15374 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15375 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15376 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15377 		delay(50 * 1000);
   15378 
   15379 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15380 		    &phyreg);
   15381 		if (rv != 0)
   15382 			goto release;
   15383 	}
   15384 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15385 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15386 
   15387 	/* Unforce SMBus mode in MAC */
   15388 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15389 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15390 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15391 
   15392 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15393 	if (rv != 0)
   15394 		goto release;
   15395 	phyreg |= HV_PM_CTRL_K1_ENA;
   15396 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15397 
   15398 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15399 		&phyreg);
   15400 	if (rv != 0)
   15401 		goto release;
   15402 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15403 	    | I218_ULP_CONFIG1_STICKY_ULP
   15404 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15405 	    | I218_ULP_CONFIG1_WOL_HOST
   15406 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15407 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15408 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15409 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15410 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15411 	phyreg |= I218_ULP_CONFIG1_START;
   15412 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15413 
   15414 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15415 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15416 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15417 
   15418 release:
   15419 	/* Release semaphore */
   15420 	sc->phy.release(sc);
   15421 	wm_gmii_reset(sc);
   15422 	delay(50 * 1000);
   15423 
   15424 	return rv;
   15425 }
   15426 
   15427 /* WOL in the newer chipset interfaces (pchlan) */
   15428 static int
   15429 wm_enable_phy_wakeup(struct wm_softc *sc)
   15430 {
   15431 	device_t dev = sc->sc_dev;
   15432 	uint32_t mreg, moff;
   15433 	uint16_t wuce, wuc, wufc, preg;
   15434 	int i, rv;
   15435 
   15436 	KASSERT(sc->sc_type >= WM_T_PCH);
   15437 
   15438 	/* Copy MAC RARs to PHY RARs */
   15439 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15440 
   15441 	/* Activate PHY wakeup */
   15442 	rv = sc->phy.acquire(sc);
   15443 	if (rv != 0) {
   15444 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15445 		    __func__);
   15446 		return rv;
   15447 	}
   15448 
   15449 	/*
   15450 	 * Enable access to PHY wakeup registers.
   15451 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15452 	 */
   15453 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15454 	if (rv != 0) {
   15455 		device_printf(dev,
   15456 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15457 		goto release;
   15458 	}
   15459 
   15460 	/* Copy MAC MTA to PHY MTA */
   15461 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15462 		uint16_t lo, hi;
   15463 
   15464 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15465 		lo = (uint16_t)(mreg & 0xffff);
   15466 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15467 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15468 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15469 	}
   15470 
   15471 	/* Configure PHY Rx Control register */
   15472 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15473 	mreg = CSR_READ(sc, WMREG_RCTL);
   15474 	if (mreg & RCTL_UPE)
   15475 		preg |= BM_RCTL_UPE;
   15476 	if (mreg & RCTL_MPE)
   15477 		preg |= BM_RCTL_MPE;
   15478 	preg &= ~(BM_RCTL_MO_MASK);
   15479 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15480 	if (moff != 0)
   15481 		preg |= moff << BM_RCTL_MO_SHIFT;
   15482 	if (mreg & RCTL_BAM)
   15483 		preg |= BM_RCTL_BAM;
   15484 	if (mreg & RCTL_PMCF)
   15485 		preg |= BM_RCTL_PMCF;
   15486 	mreg = CSR_READ(sc, WMREG_CTRL);
   15487 	if (mreg & CTRL_RFCE)
   15488 		preg |= BM_RCTL_RFCE;
   15489 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15490 
   15491 	wuc = WUC_APME | WUC_PME_EN;
   15492 	wufc = WUFC_MAG;
   15493 	/* Enable PHY wakeup in MAC register */
   15494 	CSR_WRITE(sc, WMREG_WUC,
   15495 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15496 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15497 
   15498 	/* Configure and enable PHY wakeup in PHY registers */
   15499 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15500 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15501 
   15502 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15503 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15504 
   15505 release:
   15506 	sc->phy.release(sc);
   15507 
   15508 	return 0;
   15509 }
   15510 
   15511 /* Power down workaround on D3 */
   15512 static void
   15513 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15514 {
   15515 	uint32_t reg;
   15516 	uint16_t phyreg;
   15517 	int i;
   15518 
   15519 	for (i = 0; i < 2; i++) {
   15520 		/* Disable link */
   15521 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15522 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15523 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15524 
   15525 		/*
   15526 		 * Call gig speed drop workaround on Gig disable before
   15527 		 * accessing any PHY registers
   15528 		 */
   15529 		if (sc->sc_type == WM_T_ICH8)
   15530 			wm_gig_downshift_workaround_ich8lan(sc);
   15531 
   15532 		/* Write VR power-down enable */
   15533 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15534 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15535 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15536 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15537 
   15538 		/* Read it back and test */
   15539 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15540 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15541 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15542 			break;
   15543 
   15544 		/* Issue PHY reset and repeat at most one more time */
   15545 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15546 	}
   15547 }
   15548 
   15549 /*
   15550  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15551  *  @sc: pointer to the HW structure
   15552  *
   15553  *  During S0 to Sx transition, it is possible the link remains at gig
   15554  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15555  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15556  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15557  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15558  *  needs to be written.
   15559  *  Parts that support (and are linked to a partner which support) EEE in
   15560  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15561  *  than 10Mbps w/o EEE.
   15562  */
   15563 static void
   15564 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15565 {
   15566 	device_t dev = sc->sc_dev;
   15567 	struct ethercom *ec = &sc->sc_ethercom;
   15568 	uint32_t phy_ctrl;
   15569 	int rv;
   15570 
   15571 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15572 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15573 
   15574 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15575 
   15576 	if (sc->sc_phytype == WMPHY_I217) {
   15577 		uint16_t devid = sc->sc_pcidevid;
   15578 
   15579 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15580 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15581 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15582 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15583 		    (sc->sc_type >= WM_T_PCH_SPT))
   15584 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15585 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15586 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15587 
   15588 		if (sc->phy.acquire(sc) != 0)
   15589 			goto out;
   15590 
   15591 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15592 			uint16_t eee_advert;
   15593 
   15594 			rv = wm_read_emi_reg_locked(dev,
   15595 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15596 			if (rv)
   15597 				goto release;
   15598 
   15599 			/*
   15600 			 * Disable LPLU if both link partners support 100BaseT
   15601 			 * EEE and 100Full is advertised on both ends of the
   15602 			 * link, and enable Auto Enable LPI since there will
   15603 			 * be no driver to enable LPI while in Sx.
   15604 			 */
   15605 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15606 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15607 				uint16_t anar, phy_reg;
   15608 
   15609 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15610 				    &anar);
   15611 				if (anar & ANAR_TX_FD) {
   15612 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15613 					    PHY_CTRL_NOND0A_LPLU);
   15614 
   15615 					/* Set Auto Enable LPI after link up */
   15616 					sc->phy.readreg_locked(dev, 2,
   15617 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15618 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15619 					sc->phy.writereg_locked(dev, 2,
   15620 					    I217_LPI_GPIO_CTRL, phy_reg);
   15621 				}
   15622 			}
   15623 		}
   15624 
   15625 		/*
   15626 		 * For i217 Intel Rapid Start Technology support,
   15627 		 * when the system is going into Sx and no manageability engine
   15628 		 * is present, the driver must configure proxy to reset only on
   15629 		 * power good.	LPI (Low Power Idle) state must also reset only
   15630 		 * on power good, as well as the MTA (Multicast table array).
   15631 		 * The SMBus release must also be disabled on LCD reset.
   15632 		 */
   15633 
   15634 		/*
   15635 		 * Enable MTA to reset for Intel Rapid Start Technology
   15636 		 * Support
   15637 		 */
   15638 
   15639 release:
   15640 		sc->phy.release(sc);
   15641 	}
   15642 out:
   15643 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15644 
   15645 	if (sc->sc_type == WM_T_ICH8)
   15646 		wm_gig_downshift_workaround_ich8lan(sc);
   15647 
   15648 	if (sc->sc_type >= WM_T_PCH) {
   15649 		wm_oem_bits_config_ich8lan(sc, false);
   15650 
   15651 		/* Reset PHY to activate OEM bits on 82577/8 */
   15652 		if (sc->sc_type == WM_T_PCH)
   15653 			wm_reset_phy(sc);
   15654 
   15655 		if (sc->phy.acquire(sc) != 0)
   15656 			return;
   15657 		wm_write_smbus_addr(sc);
   15658 		sc->phy.release(sc);
   15659 	}
   15660 }
   15661 
   15662 /*
   15663  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15664  *  @sc: pointer to the HW structure
   15665  *
   15666  *  During Sx to S0 transitions on non-managed devices or managed devices
   15667  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15668  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15669  *  the PHY.
   15670  *  On i217, setup Intel Rapid Start Technology.
   15671  */
   15672 static int
   15673 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15674 {
   15675 	device_t dev = sc->sc_dev;
   15676 	int rv;
   15677 
   15678 	if (sc->sc_type < WM_T_PCH2)
   15679 		return 0;
   15680 
   15681 	rv = wm_init_phy_workarounds_pchlan(sc);
   15682 	if (rv != 0)
   15683 		return -1;
   15684 
   15685 	/* For i217 Intel Rapid Start Technology support when the system
   15686 	 * is transitioning from Sx and no manageability engine is present
   15687 	 * configure SMBus to restore on reset, disable proxy, and enable
   15688 	 * the reset on MTA (Multicast table array).
   15689 	 */
   15690 	if (sc->sc_phytype == WMPHY_I217) {
   15691 		uint16_t phy_reg;
   15692 
   15693 		if (sc->phy.acquire(sc) != 0)
   15694 			return -1;
   15695 
   15696 		/* Clear Auto Enable LPI after link up */
   15697 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15698 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15699 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15700 
   15701 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15702 			/* Restore clear on SMB if no manageability engine
   15703 			 * is present
   15704 			 */
   15705 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15706 			    &phy_reg);
   15707 			if (rv != 0)
   15708 				goto release;
   15709 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15710 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15711 
   15712 			/* Disable Proxy */
   15713 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15714 		}
   15715 		/* Enable reset on MTA */
   15716 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15717 		if (rv != 0)
   15718 			goto release;
   15719 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15720 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15721 
   15722 release:
   15723 		sc->phy.release(sc);
   15724 		return rv;
   15725 	}
   15726 
   15727 	return 0;
   15728 }
   15729 
   15730 static void
   15731 wm_enable_wakeup(struct wm_softc *sc)
   15732 {
   15733 	uint32_t reg, pmreg;
   15734 	pcireg_t pmode;
   15735 	int rv = 0;
   15736 
   15737 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15738 		device_xname(sc->sc_dev), __func__));
   15739 
   15740 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15741 	    &pmreg, NULL) == 0)
   15742 		return;
   15743 
   15744 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15745 		goto pme;
   15746 
   15747 	/* Advertise the wakeup capability */
   15748 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15749 	    | CTRL_SWDPIN(3));
   15750 
   15751 	/* Keep the laser running on fiber adapters */
   15752 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15753 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15754 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15755 		reg |= CTRL_EXT_SWDPIN(3);
   15756 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15757 	}
   15758 
   15759 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15760 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15761 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15762 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15763 		wm_suspend_workarounds_ich8lan(sc);
   15764 
   15765 #if 0	/* For the multicast packet */
   15766 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15767 	reg |= WUFC_MC;
   15768 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15769 #endif
   15770 
   15771 	if (sc->sc_type >= WM_T_PCH) {
   15772 		rv = wm_enable_phy_wakeup(sc);
   15773 		if (rv != 0)
   15774 			goto pme;
   15775 	} else {
   15776 		/* Enable wakeup by the MAC */
   15777 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15778 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15779 	}
   15780 
   15781 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15782 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15783 		|| (sc->sc_type == WM_T_PCH2))
   15784 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15785 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15786 
   15787 pme:
   15788 	/* Request PME */
   15789 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15790 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   15791 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15792 		/* For WOL */
   15793 		pmode |= PCI_PMCSR_PME_EN;
   15794 	} else {
   15795 		/* Disable WOL */
   15796 		pmode &= ~PCI_PMCSR_PME_EN;
   15797 	}
   15798 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15799 }
   15800 
   15801 /* Disable ASPM L0s and/or L1 for workaround */
   15802 static void
   15803 wm_disable_aspm(struct wm_softc *sc)
   15804 {
   15805 	pcireg_t reg, mask = 0;
   15806 	unsigned const char *str = "";
   15807 
   15808 	/*
   15809 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15810 	 * space.
   15811 	 */
   15812 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15813 		return;
   15814 
   15815 	switch (sc->sc_type) {
   15816 	case WM_T_82571:
   15817 	case WM_T_82572:
   15818 		/*
   15819 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15820 		 * State Power management L1 State (ASPM L1).
   15821 		 */
   15822 		mask = PCIE_LCSR_ASPM_L1;
   15823 		str = "L1 is";
   15824 		break;
   15825 	case WM_T_82573:
   15826 	case WM_T_82574:
   15827 	case WM_T_82583:
   15828 		/*
   15829 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15830 		 *
   15831 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15832 		 * some chipset.  The document of 82574 and 82583 says that
   15833 		 * disabling L0s with some specific chipset is sufficient,
   15834 		 * but we follow as of the Intel em driver does.
   15835 		 *
   15836 		 * References:
   15837 		 * Errata 8 of the Specification Update of i82573.
   15838 		 * Errata 20 of the Specification Update of i82574.
   15839 		 * Errata 9 of the Specification Update of i82583.
   15840 		 */
   15841 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15842 		str = "L0s and L1 are";
   15843 		break;
   15844 	default:
   15845 		return;
   15846 	}
   15847 
   15848 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15849 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15850 	reg &= ~mask;
   15851 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15852 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15853 
   15854 	/* Print only in wm_attach() */
   15855 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15856 		aprint_verbose_dev(sc->sc_dev,
   15857 		    "ASPM %s disabled to workaround the errata.\n", str);
   15858 }
   15859 
   15860 /* LPLU */
   15861 
   15862 static void
   15863 wm_lplu_d0_disable(struct wm_softc *sc)
   15864 {
   15865 	struct mii_data *mii = &sc->sc_mii;
   15866 	uint32_t reg;
   15867 	uint16_t phyval;
   15868 
   15869 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15870 		device_xname(sc->sc_dev), __func__));
   15871 
   15872 	if (sc->sc_phytype == WMPHY_IFE)
   15873 		return;
   15874 
   15875 	switch (sc->sc_type) {
   15876 	case WM_T_82571:
   15877 	case WM_T_82572:
   15878 	case WM_T_82573:
   15879 	case WM_T_82575:
   15880 	case WM_T_82576:
   15881 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   15882 		phyval &= ~PMR_D0_LPLU;
   15883 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   15884 		break;
   15885 	case WM_T_82580:
   15886 	case WM_T_I350:
   15887 	case WM_T_I210:
   15888 	case WM_T_I211:
   15889 		reg = CSR_READ(sc, WMREG_PHPM);
   15890 		reg &= ~PHPM_D0A_LPLU;
   15891 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15892 		break;
   15893 	case WM_T_82574:
   15894 	case WM_T_82583:
   15895 	case WM_T_ICH8:
   15896 	case WM_T_ICH9:
   15897 	case WM_T_ICH10:
   15898 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15899 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15900 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15901 		CSR_WRITE_FLUSH(sc);
   15902 		break;
   15903 	case WM_T_PCH:
   15904 	case WM_T_PCH2:
   15905 	case WM_T_PCH_LPT:
   15906 	case WM_T_PCH_SPT:
   15907 	case WM_T_PCH_CNP:
   15908 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15909 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15910 		if (wm_phy_resetisblocked(sc) == false)
   15911 			phyval |= HV_OEM_BITS_ANEGNOW;
   15912 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15913 		break;
   15914 	default:
   15915 		break;
   15916 	}
   15917 }
   15918 
   15919 /* EEE */
   15920 
   15921 static int
   15922 wm_set_eee_i350(struct wm_softc *sc)
   15923 {
   15924 	struct ethercom *ec = &sc->sc_ethercom;
   15925 	uint32_t ipcnfg, eeer;
   15926 	uint32_t ipcnfg_mask
   15927 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15928 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15929 
   15930 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15931 
   15932 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15933 	eeer = CSR_READ(sc, WMREG_EEER);
   15934 
   15935 	/* Enable or disable per user setting */
   15936 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15937 		ipcnfg |= ipcnfg_mask;
   15938 		eeer |= eeer_mask;
   15939 	} else {
   15940 		ipcnfg &= ~ipcnfg_mask;
   15941 		eeer &= ~eeer_mask;
   15942 	}
   15943 
   15944 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15945 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15946 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15947 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15948 
   15949 	return 0;
   15950 }
   15951 
   15952 static int
   15953 wm_set_eee_pchlan(struct wm_softc *sc)
   15954 {
   15955 	device_t dev = sc->sc_dev;
   15956 	struct ethercom *ec = &sc->sc_ethercom;
   15957 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15958 	int rv = 0;
   15959 
   15960 	switch (sc->sc_phytype) {
   15961 	case WMPHY_82579:
   15962 		lpa = I82579_EEE_LP_ABILITY;
   15963 		pcs_status = I82579_EEE_PCS_STATUS;
   15964 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15965 		break;
   15966 	case WMPHY_I217:
   15967 		lpa = I217_EEE_LP_ABILITY;
   15968 		pcs_status = I217_EEE_PCS_STATUS;
   15969 		adv_addr = I217_EEE_ADVERTISEMENT;
   15970 		break;
   15971 	default:
   15972 		return 0;
   15973 	}
   15974 
   15975 	if (sc->phy.acquire(sc)) {
   15976 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15977 		return 0;
   15978 	}
   15979 
   15980 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15981 	if (rv != 0)
   15982 		goto release;
   15983 
   15984 	/* Clear bits that enable EEE in various speeds */
   15985 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15986 
   15987 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15988 		/* Save off link partner's EEE ability */
   15989 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15990 		if (rv != 0)
   15991 			goto release;
   15992 
   15993 		/* Read EEE advertisement */
   15994 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15995 			goto release;
   15996 
   15997 		/*
   15998 		 * Enable EEE only for speeds in which the link partner is
   15999 		 * EEE capable and for which we advertise EEE.
   16000 		 */
   16001 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   16002 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   16003 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   16004 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   16005 			if ((data & ANLPAR_TX_FD) != 0)
   16006 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   16007 			else {
   16008 				/*
   16009 				 * EEE is not supported in 100Half, so ignore
   16010 				 * partner's EEE in 100 ability if full-duplex
   16011 				 * is not advertised.
   16012 				 */
   16013 				sc->eee_lp_ability
   16014 				    &= ~AN_EEEADVERT_100_TX;
   16015 			}
   16016 		}
   16017 	}
   16018 
   16019 	if (sc->sc_phytype == WMPHY_82579) {
   16020 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   16021 		if (rv != 0)
   16022 			goto release;
   16023 
   16024 		data &= ~I82579_LPI_PLL_SHUT_100;
   16025 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   16026 	}
   16027 
   16028 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   16029 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   16030 		goto release;
   16031 
   16032 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   16033 release:
   16034 	sc->phy.release(sc);
   16035 
   16036 	return rv;
   16037 }
   16038 
   16039 static int
   16040 wm_set_eee(struct wm_softc *sc)
   16041 {
   16042 	struct ethercom *ec = &sc->sc_ethercom;
   16043 
   16044 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   16045 		return 0;
   16046 
   16047 	if (sc->sc_type == WM_T_I354) {
   16048 		/* I354 uses an external PHY */
   16049 		return 0; /* not yet */
   16050 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   16051 		return wm_set_eee_i350(sc);
   16052 	else if (sc->sc_type >= WM_T_PCH2)
   16053 		return wm_set_eee_pchlan(sc);
   16054 
   16055 	return 0;
   16056 }
   16057 
   16058 /*
   16059  * Workarounds (mainly PHY related).
   16060  * Basically, PHY's workarounds are in the PHY drivers.
   16061  */
   16062 
   16063 /* Work-around for 82566 Kumeran PCS lock loss */
   16064 static int
   16065 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   16066 {
   16067 	struct mii_data *mii = &sc->sc_mii;
   16068 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16069 	int i, reg, rv;
   16070 	uint16_t phyreg;
   16071 
   16072 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16073 		device_xname(sc->sc_dev), __func__));
   16074 
   16075 	/* If the link is not up, do nothing */
   16076 	if ((status & STATUS_LU) == 0)
   16077 		return 0;
   16078 
   16079 	/* Nothing to do if the link is other than 1Gbps */
   16080 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   16081 		return 0;
   16082 
   16083 	for (i = 0; i < 10; i++) {
   16084 		/* read twice */
   16085 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16086 		if (rv != 0)
   16087 			return rv;
   16088 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16089 		if (rv != 0)
   16090 			return rv;
   16091 
   16092 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   16093 			goto out;	/* GOOD! */
   16094 
   16095 		/* Reset the PHY */
   16096 		wm_reset_phy(sc);
   16097 		delay(5*1000);
   16098 	}
   16099 
   16100 	/* Disable GigE link negotiation */
   16101 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16102 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16103 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16104 
   16105 	/*
   16106 	 * Call gig speed drop workaround on Gig disable before accessing
   16107 	 * any PHY registers.
   16108 	 */
   16109 	wm_gig_downshift_workaround_ich8lan(sc);
   16110 
   16111 out:
   16112 	return 0;
   16113 }
   16114 
   16115 /*
   16116  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   16117  *  @sc: pointer to the HW structure
   16118  *
   16119  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   16120  *  LPLU, Gig disable, MDIC PHY reset):
   16121  *    1) Set Kumeran Near-end loopback
   16122  *    2) Clear Kumeran Near-end loopback
   16123  *  Should only be called for ICH8[m] devices with any 1G Phy.
   16124  */
   16125 static void
   16126 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   16127 {
   16128 	uint16_t kmreg;
   16129 
   16130 	/* Only for igp3 */
   16131 	if (sc->sc_phytype == WMPHY_IGP_3) {
   16132 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   16133 			return;
   16134 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   16135 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   16136 			return;
   16137 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   16138 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   16139 	}
   16140 }
   16141 
   16142 /*
   16143  * Workaround for pch's PHYs
   16144  * XXX should be moved to new PHY driver?
   16145  */
   16146 static int
   16147 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16148 {
   16149 	device_t dev = sc->sc_dev;
   16150 	struct mii_data *mii = &sc->sc_mii;
   16151 	struct mii_softc *child;
   16152 	uint16_t phy_data, phyrev = 0;
   16153 	int phytype = sc->sc_phytype;
   16154 	int rv;
   16155 
   16156 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16157 		device_xname(dev), __func__));
   16158 	KASSERT(sc->sc_type == WM_T_PCH);
   16159 
   16160 	/* Set MDIO slow mode before any other MDIO access */
   16161 	if (phytype == WMPHY_82577)
   16162 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16163 			return rv;
   16164 
   16165 	child = LIST_FIRST(&mii->mii_phys);
   16166 	if (child != NULL)
   16167 		phyrev = child->mii_mpd_rev;
   16168 
   16169 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16170 	if ((child != NULL) &&
   16171 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16172 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16173 		/* Disable generation of early preamble (0x4431) */
   16174 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16175 		    &phy_data);
   16176 		if (rv != 0)
   16177 			return rv;
   16178 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16179 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16180 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16181 		    phy_data);
   16182 		if (rv != 0)
   16183 			return rv;
   16184 
   16185 		/* Preamble tuning for SSC */
   16186 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16187 		if (rv != 0)
   16188 			return rv;
   16189 	}
   16190 
   16191 	/* 82578 */
   16192 	if (phytype == WMPHY_82578) {
   16193 		/*
   16194 		 * Return registers to default by doing a soft reset then
   16195 		 * writing 0x3140 to the control register
   16196 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16197 		 */
   16198 		if ((child != NULL) && (phyrev < 2)) {
   16199 			PHY_RESET(child);
   16200 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16201 			if (rv != 0)
   16202 				return rv;
   16203 		}
   16204 	}
   16205 
   16206 	/* Select page 0 */
   16207 	if ((rv = sc->phy.acquire(sc)) != 0)
   16208 		return rv;
   16209 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16210 	sc->phy.release(sc);
   16211 	if (rv != 0)
   16212 		return rv;
   16213 
   16214 	/*
   16215 	 * Configure the K1 Si workaround during phy reset assuming there is
   16216 	 * link so that it disables K1 if link is in 1Gbps.
   16217 	 */
   16218 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16219 		return rv;
   16220 
   16221 	/* Workaround for link disconnects on a busy hub in half duplex */
   16222 	rv = sc->phy.acquire(sc);
   16223 	if (rv)
   16224 		return rv;
   16225 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16226 	if (rv)
   16227 		goto release;
   16228 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16229 	    phy_data & 0x00ff);
   16230 	if (rv)
   16231 		goto release;
   16232 
   16233 	/* Set MSE higher to enable link to stay up when noise is high */
   16234 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16235 release:
   16236 	sc->phy.release(sc);
   16237 
   16238 	return rv;
   16239 }
   16240 
   16241 /*
   16242  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16243  *  @sc:   pointer to the HW structure
   16244  */
   16245 static void
   16246 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16247 {
   16248 
   16249 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16250 		device_xname(sc->sc_dev), __func__));
   16251 
   16252 	if (sc->phy.acquire(sc) != 0)
   16253 		return;
   16254 
   16255 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16256 
   16257 	sc->phy.release(sc);
   16258 }
   16259 
   16260 static void
   16261 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16262 {
   16263 	device_t dev = sc->sc_dev;
   16264 	uint32_t mac_reg;
   16265 	uint16_t i, wuce;
   16266 	int count;
   16267 
   16268 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16269 		device_xname(dev), __func__));
   16270 
   16271 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16272 		return;
   16273 
   16274 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16275 	count = wm_rar_count(sc);
   16276 	for (i = 0; i < count; i++) {
   16277 		uint16_t lo, hi;
   16278 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16279 		lo = (uint16_t)(mac_reg & 0xffff);
   16280 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16281 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16282 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16283 
   16284 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16285 		lo = (uint16_t)(mac_reg & 0xffff);
   16286 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16287 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16288 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16289 	}
   16290 
   16291 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16292 }
   16293 
   16294 /*
   16295  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16296  *  with 82579 PHY
   16297  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16298  */
   16299 static int
   16300 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16301 {
   16302 	device_t dev = sc->sc_dev;
   16303 	int rar_count;
   16304 	int rv;
   16305 	uint32_t mac_reg;
   16306 	uint16_t dft_ctrl, data;
   16307 	uint16_t i;
   16308 
   16309 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16310 		device_xname(dev), __func__));
   16311 
   16312 	if (sc->sc_type < WM_T_PCH2)
   16313 		return 0;
   16314 
   16315 	/* Acquire PHY semaphore */
   16316 	rv = sc->phy.acquire(sc);
   16317 	if (rv != 0)
   16318 		return rv;
   16319 
   16320 	/* Disable Rx path while enabling/disabling workaround */
   16321 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16322 	if (rv != 0)
   16323 		goto out;
   16324 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16325 	    dft_ctrl | (1 << 14));
   16326 	if (rv != 0)
   16327 		goto out;
   16328 
   16329 	if (enable) {
   16330 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16331 		 * SHRAL/H) and initial CRC values to the MAC
   16332 		 */
   16333 		rar_count = wm_rar_count(sc);
   16334 		for (i = 0; i < rar_count; i++) {
   16335 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16336 			uint32_t addr_high, addr_low;
   16337 
   16338 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16339 			if (!(addr_high & RAL_AV))
   16340 				continue;
   16341 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16342 			mac_addr[0] = (addr_low & 0xFF);
   16343 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16344 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16345 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16346 			mac_addr[4] = (addr_high & 0xFF);
   16347 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16348 
   16349 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16350 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16351 		}
   16352 
   16353 		/* Write Rx addresses to the PHY */
   16354 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16355 	}
   16356 
   16357 	/*
   16358 	 * If enable ==
   16359 	 *	true: Enable jumbo frame workaround in the MAC.
   16360 	 *	false: Write MAC register values back to h/w defaults.
   16361 	 */
   16362 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16363 	if (enable) {
   16364 		mac_reg &= ~(1 << 14);
   16365 		mac_reg |= (7 << 15);
   16366 	} else
   16367 		mac_reg &= ~(0xf << 14);
   16368 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16369 
   16370 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16371 	if (enable) {
   16372 		mac_reg |= RCTL_SECRC;
   16373 		sc->sc_rctl |= RCTL_SECRC;
   16374 		sc->sc_flags |= WM_F_CRC_STRIP;
   16375 	} else {
   16376 		mac_reg &= ~RCTL_SECRC;
   16377 		sc->sc_rctl &= ~RCTL_SECRC;
   16378 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16379 	}
   16380 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16381 
   16382 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16383 	if (rv != 0)
   16384 		goto out;
   16385 	if (enable)
   16386 		data |= 1 << 0;
   16387 	else
   16388 		data &= ~(1 << 0);
   16389 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16390 	if (rv != 0)
   16391 		goto out;
   16392 
   16393 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16394 	if (rv != 0)
   16395 		goto out;
   16396 	/*
   16397 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16398 	 * on both the enable case and the disable case. Is it correct?
   16399 	 */
   16400 	data &= ~(0xf << 8);
   16401 	data |= (0xb << 8);
   16402 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16403 	if (rv != 0)
   16404 		goto out;
   16405 
   16406 	/*
   16407 	 * If enable ==
   16408 	 *	true: Enable jumbo frame workaround in the PHY.
   16409 	 *	false: Write PHY register values back to h/w defaults.
   16410 	 */
   16411 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16412 	if (rv != 0)
   16413 		goto out;
   16414 	data &= ~(0x7F << 5);
   16415 	if (enable)
   16416 		data |= (0x37 << 5);
   16417 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16418 	if (rv != 0)
   16419 		goto out;
   16420 
   16421 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16422 	if (rv != 0)
   16423 		goto out;
   16424 	if (enable)
   16425 		data &= ~(1 << 13);
   16426 	else
   16427 		data |= (1 << 13);
   16428 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16429 	if (rv != 0)
   16430 		goto out;
   16431 
   16432 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16433 	if (rv != 0)
   16434 		goto out;
   16435 	data &= ~(0x3FF << 2);
   16436 	if (enable)
   16437 		data |= (I82579_TX_PTR_GAP << 2);
   16438 	else
   16439 		data |= (0x8 << 2);
   16440 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16441 	if (rv != 0)
   16442 		goto out;
   16443 
   16444 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16445 	    enable ? 0xf100 : 0x7e00);
   16446 	if (rv != 0)
   16447 		goto out;
   16448 
   16449 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16450 	if (rv != 0)
   16451 		goto out;
   16452 	if (enable)
   16453 		data |= 1 << 10;
   16454 	else
   16455 		data &= ~(1 << 10);
   16456 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16457 	if (rv != 0)
   16458 		goto out;
   16459 
   16460 	/* Re-enable Rx path after enabling/disabling workaround */
   16461 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16462 	    dft_ctrl & ~(1 << 14));
   16463 
   16464 out:
   16465 	sc->phy.release(sc);
   16466 
   16467 	return rv;
   16468 }
   16469 
   16470 /*
   16471  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16472  *  done after every PHY reset.
   16473  */
   16474 static int
   16475 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16476 {
   16477 	device_t dev = sc->sc_dev;
   16478 	int rv;
   16479 
   16480 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16481 		device_xname(dev), __func__));
   16482 	KASSERT(sc->sc_type == WM_T_PCH2);
   16483 
   16484 	/* Set MDIO slow mode before any other MDIO access */
   16485 	rv = wm_set_mdio_slow_mode_hv(sc);
   16486 	if (rv != 0)
   16487 		return rv;
   16488 
   16489 	rv = sc->phy.acquire(sc);
   16490 	if (rv != 0)
   16491 		return rv;
   16492 	/* Set MSE higher to enable link to stay up when noise is high */
   16493 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16494 	if (rv != 0)
   16495 		goto release;
   16496 	/* Drop link after 5 times MSE threshold was reached */
   16497 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16498 release:
   16499 	sc->phy.release(sc);
   16500 
   16501 	return rv;
   16502 }
   16503 
   16504 /**
   16505  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   16506  *  @link: link up bool flag
   16507  *
   16508  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   16509  *  preventing further DMA write requests.  Workaround the issue by disabling
   16510  *  the de-assertion of the clock request when in 1Gpbs mode.
   16511  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   16512  *  speeds in order to avoid Tx hangs.
   16513  **/
   16514 static int
   16515 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   16516 {
   16517 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   16518 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16519 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   16520 	uint16_t phyreg;
   16521 
   16522 	if (link && (speed == STATUS_SPEED_1000)) {
   16523 		sc->phy.acquire(sc);
   16524 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16525 		    &phyreg);
   16526 		if (rv != 0)
   16527 			goto release;
   16528 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16529 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   16530 		if (rv != 0)
   16531 			goto release;
   16532 		delay(20);
   16533 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   16534 
   16535 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16536 		    &phyreg);
   16537 release:
   16538 		sc->phy.release(sc);
   16539 		return rv;
   16540 	}
   16541 
   16542 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   16543 
   16544 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   16545 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   16546 	    || !link
   16547 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   16548 		goto update_fextnvm6;
   16549 
   16550 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   16551 
   16552 	/* Clear link status transmit timeout */
   16553 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   16554 	if (speed == STATUS_SPEED_100) {
   16555 		/* Set inband Tx timeout to 5x10us for 100Half */
   16556 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16557 
   16558 		/* Do not extend the K1 entry latency for 100Half */
   16559 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16560 	} else {
   16561 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   16562 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16563 
   16564 		/* Extend the K1 entry latency for 10 Mbps */
   16565 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16566 	}
   16567 
   16568 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   16569 
   16570 update_fextnvm6:
   16571 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   16572 	return 0;
   16573 }
   16574 
   16575 /*
   16576  *  wm_k1_gig_workaround_hv - K1 Si workaround
   16577  *  @sc:   pointer to the HW structure
   16578  *  @link: link up bool flag
   16579  *
   16580  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   16581  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   16582  *  If link is down, the function will restore the default K1 setting located
   16583  *  in the NVM.
   16584  */
   16585 static int
   16586 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   16587 {
   16588 	int k1_enable = sc->sc_nvm_k1_enabled;
   16589 
   16590 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16591 		device_xname(sc->sc_dev), __func__));
   16592 
   16593 	if (sc->phy.acquire(sc) != 0)
   16594 		return -1;
   16595 
   16596 	if (link) {
   16597 		k1_enable = 0;
   16598 
   16599 		/* Link stall fix for link up */
   16600 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16601 		    0x0100);
   16602 	} else {
   16603 		/* Link stall fix for link down */
   16604 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16605 		    0x4100);
   16606 	}
   16607 
   16608 	wm_configure_k1_ich8lan(sc, k1_enable);
   16609 	sc->phy.release(sc);
   16610 
   16611 	return 0;
   16612 }
   16613 
   16614 /*
   16615  *  wm_k1_workaround_lv - K1 Si workaround
   16616  *  @sc:   pointer to the HW structure
   16617  *
   16618  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   16619  *  Disable K1 for 1000 and 100 speeds
   16620  */
   16621 static int
   16622 wm_k1_workaround_lv(struct wm_softc *sc)
   16623 {
   16624 	uint32_t reg;
   16625 	uint16_t phyreg;
   16626 	int rv;
   16627 
   16628 	if (sc->sc_type != WM_T_PCH2)
   16629 		return 0;
   16630 
   16631 	/* Set K1 beacon duration based on 10Mbps speed */
   16632 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16633 	if (rv != 0)
   16634 		return rv;
   16635 
   16636 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16637 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16638 		if (phyreg &
   16639 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16640 			/* LV 1G/100 Packet drop issue wa  */
   16641 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16642 			    &phyreg);
   16643 			if (rv != 0)
   16644 				return rv;
   16645 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16646 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16647 			    phyreg);
   16648 			if (rv != 0)
   16649 				return rv;
   16650 		} else {
   16651 			/* For 10Mbps */
   16652 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16653 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16654 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16655 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16656 		}
   16657 	}
   16658 
   16659 	return 0;
   16660 }
   16661 
   16662 /*
   16663  *  wm_link_stall_workaround_hv - Si workaround
   16664  *  @sc: pointer to the HW structure
   16665  *
   16666  *  This function works around a Si bug where the link partner can get
   16667  *  a link up indication before the PHY does. If small packets are sent
   16668  *  by the link partner they can be placed in the packet buffer without
   16669  *  being properly accounted for by the PHY and will stall preventing
   16670  *  further packets from being received.  The workaround is to clear the
   16671  *  packet buffer after the PHY detects link up.
   16672  */
   16673 static int
   16674 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16675 {
   16676 	uint16_t phyreg;
   16677 
   16678 	if (sc->sc_phytype != WMPHY_82578)
   16679 		return 0;
   16680 
   16681 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16682 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16683 	if ((phyreg & BMCR_LOOP) != 0)
   16684 		return 0;
   16685 
   16686 	/* Check if link is up and at 1Gbps */
   16687 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16688 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16689 	    | BM_CS_STATUS_SPEED_MASK;
   16690 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16691 		| BM_CS_STATUS_SPEED_1000))
   16692 		return 0;
   16693 
   16694 	delay(200 * 1000);	/* XXX too big */
   16695 
   16696 	/* Flush the packets in the fifo buffer */
   16697 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16698 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16699 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16700 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16701 
   16702 	return 0;
   16703 }
   16704 
   16705 static int
   16706 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16707 {
   16708 	int rv;
   16709 	uint16_t reg;
   16710 
   16711 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16712 	if (rv != 0)
   16713 		return rv;
   16714 
   16715 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16716 	    reg | HV_KMRN_MDIO_SLOW);
   16717 }
   16718 
   16719 /*
   16720  *  wm_configure_k1_ich8lan - Configure K1 power state
   16721  *  @sc: pointer to the HW structure
   16722  *  @enable: K1 state to configure
   16723  *
   16724  *  Configure the K1 power state based on the provided parameter.
   16725  *  Assumes semaphore already acquired.
   16726  */
   16727 static void
   16728 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16729 {
   16730 	uint32_t ctrl, ctrl_ext, tmp;
   16731 	uint16_t kmreg;
   16732 	int rv;
   16733 
   16734 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16735 
   16736 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16737 	if (rv != 0)
   16738 		return;
   16739 
   16740 	if (k1_enable)
   16741 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16742 	else
   16743 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16744 
   16745 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16746 	if (rv != 0)
   16747 		return;
   16748 
   16749 	delay(20);
   16750 
   16751 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16752 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16753 
   16754 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16755 	tmp |= CTRL_FRCSPD;
   16756 
   16757 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16758 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16759 	CSR_WRITE_FLUSH(sc);
   16760 	delay(20);
   16761 
   16762 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16763 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16764 	CSR_WRITE_FLUSH(sc);
   16765 	delay(20);
   16766 
   16767 	return;
   16768 }
   16769 
   16770 /* special case - for 82575 - need to do manual init ... */
   16771 static void
   16772 wm_reset_init_script_82575(struct wm_softc *sc)
   16773 {
   16774 	/*
   16775 	 * Remark: this is untested code - we have no board without EEPROM
   16776 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16777 	 */
   16778 
   16779 	/* SerDes configuration via SERDESCTRL */
   16780 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16781 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16782 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16783 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16784 
   16785 	/* CCM configuration via CCMCTL register */
   16786 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16787 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16788 
   16789 	/* PCIe lanes configuration */
   16790 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16791 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16792 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16793 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16794 
   16795 	/* PCIe PLL Configuration */
   16796 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16797 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16798 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16799 }
   16800 
   16801 static void
   16802 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16803 {
   16804 	uint32_t reg;
   16805 	uint16_t nvmword;
   16806 	int rv;
   16807 
   16808 	if (sc->sc_type != WM_T_82580)
   16809 		return;
   16810 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16811 		return;
   16812 
   16813 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16814 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16815 	if (rv != 0) {
   16816 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16817 		    __func__);
   16818 		return;
   16819 	}
   16820 
   16821 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16822 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16823 		reg |= MDICNFG_DEST;
   16824 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16825 		reg |= MDICNFG_COM_MDIO;
   16826 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16827 }
   16828 
   16829 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16830 
   16831 static bool
   16832 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16833 {
   16834 	uint32_t reg;
   16835 	uint16_t id1, id2;
   16836 	int i, rv;
   16837 
   16838 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16839 		device_xname(sc->sc_dev), __func__));
   16840 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16841 
   16842 	id1 = id2 = 0xffff;
   16843 	for (i = 0; i < 2; i++) {
   16844 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16845 		    &id1);
   16846 		if ((rv != 0) || MII_INVALIDID(id1))
   16847 			continue;
   16848 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16849 		    &id2);
   16850 		if ((rv != 0) || MII_INVALIDID(id2))
   16851 			continue;
   16852 		break;
   16853 	}
   16854 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16855 		goto out;
   16856 
   16857 	/*
   16858 	 * In case the PHY needs to be in mdio slow mode,
   16859 	 * set slow mode and try to get the PHY id again.
   16860 	 */
   16861 	rv = 0;
   16862 	if (sc->sc_type < WM_T_PCH_LPT) {
   16863 		sc->phy.release(sc);
   16864 		wm_set_mdio_slow_mode_hv(sc);
   16865 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16866 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16867 		sc->phy.acquire(sc);
   16868 	}
   16869 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16870 		device_printf(sc->sc_dev, "XXX return with false\n");
   16871 		return false;
   16872 	}
   16873 out:
   16874 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16875 		/* Only unforce SMBus if ME is not active */
   16876 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16877 			uint16_t phyreg;
   16878 
   16879 			/* Unforce SMBus mode in PHY */
   16880 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16881 			    CV_SMB_CTRL, &phyreg);
   16882 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16883 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16884 			    CV_SMB_CTRL, phyreg);
   16885 
   16886 			/* Unforce SMBus mode in MAC */
   16887 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16888 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16889 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16890 		}
   16891 	}
   16892 	return true;
   16893 }
   16894 
   16895 static void
   16896 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16897 {
   16898 	uint32_t reg;
   16899 	int i;
   16900 
   16901 	/* Set PHY Config Counter to 50msec */
   16902 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16903 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16904 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16905 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16906 
   16907 	/* Toggle LANPHYPC */
   16908 	reg = CSR_READ(sc, WMREG_CTRL);
   16909 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16910 	reg &= ~CTRL_LANPHYPC_VALUE;
   16911 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16912 	CSR_WRITE_FLUSH(sc);
   16913 	delay(1000);
   16914 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16915 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16916 	CSR_WRITE_FLUSH(sc);
   16917 
   16918 	if (sc->sc_type < WM_T_PCH_LPT)
   16919 		delay(50 * 1000);
   16920 	else {
   16921 		i = 20;
   16922 
   16923 		do {
   16924 			delay(5 * 1000);
   16925 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16926 		    && i--);
   16927 
   16928 		delay(30 * 1000);
   16929 	}
   16930 }
   16931 
   16932 static int
   16933 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16934 {
   16935 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16936 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16937 	uint32_t rxa;
   16938 	uint16_t scale = 0, lat_enc = 0;
   16939 	int32_t obff_hwm = 0;
   16940 	int64_t lat_ns, value;
   16941 
   16942 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16943 		device_xname(sc->sc_dev), __func__));
   16944 
   16945 	if (link) {
   16946 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16947 		uint32_t status;
   16948 		uint16_t speed;
   16949 		pcireg_t preg;
   16950 
   16951 		status = CSR_READ(sc, WMREG_STATUS);
   16952 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16953 		case STATUS_SPEED_10:
   16954 			speed = 10;
   16955 			break;
   16956 		case STATUS_SPEED_100:
   16957 			speed = 100;
   16958 			break;
   16959 		case STATUS_SPEED_1000:
   16960 			speed = 1000;
   16961 			break;
   16962 		default:
   16963 			device_printf(sc->sc_dev, "Unknown speed "
   16964 			    "(status = %08x)\n", status);
   16965 			return -1;
   16966 		}
   16967 
   16968 		/* Rx Packet Buffer Allocation size (KB) */
   16969 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16970 
   16971 		/*
   16972 		 * Determine the maximum latency tolerated by the device.
   16973 		 *
   16974 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16975 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16976 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16977 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16978 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16979 		 */
   16980 		lat_ns = ((int64_t)rxa * 1024 -
   16981 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16982 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16983 		if (lat_ns < 0)
   16984 			lat_ns = 0;
   16985 		else
   16986 			lat_ns /= speed;
   16987 		value = lat_ns;
   16988 
   16989 		while (value > LTRV_VALUE) {
   16990 			scale ++;
   16991 			value = howmany(value, __BIT(5));
   16992 		}
   16993 		if (scale > LTRV_SCALE_MAX) {
   16994 			device_printf(sc->sc_dev,
   16995 			    "Invalid LTR latency scale %d\n", scale);
   16996 			return -1;
   16997 		}
   16998 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16999 
   17000 		/* Determine the maximum latency tolerated by the platform */
   17001 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17002 		    WM_PCI_LTR_CAP_LPT);
   17003 		max_snoop = preg & 0xffff;
   17004 		max_nosnoop = preg >> 16;
   17005 
   17006 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   17007 
   17008 		if (lat_enc > max_ltr_enc) {
   17009 			lat_enc = max_ltr_enc;
   17010 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   17011 			    * PCI_LTR_SCALETONS(
   17012 				    __SHIFTOUT(lat_enc,
   17013 					PCI_LTR_MAXSNOOPLAT_SCALE));
   17014 		}
   17015 
   17016 		if (lat_ns) {
   17017 			lat_ns *= speed * 1000;
   17018 			lat_ns /= 8;
   17019 			lat_ns /= 1000000000;
   17020 			obff_hwm = (int32_t)(rxa - lat_ns);
   17021 		}
   17022 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   17023 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   17024 			    "(rxa = %d, lat_ns = %d)\n",
   17025 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   17026 			return -1;
   17027 		}
   17028 	}
   17029 	/* Snoop and No-Snoop latencies the same */
   17030 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   17031 	CSR_WRITE(sc, WMREG_LTRV, reg);
   17032 
   17033 	/* Set OBFF high water mark */
   17034 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   17035 	reg |= obff_hwm;
   17036 	CSR_WRITE(sc, WMREG_SVT, reg);
   17037 
   17038 	/* Enable OBFF */
   17039 	reg = CSR_READ(sc, WMREG_SVCR);
   17040 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   17041 	CSR_WRITE(sc, WMREG_SVCR, reg);
   17042 
   17043 	return 0;
   17044 }
   17045 
   17046 /*
   17047  * I210 Errata 25 and I211 Errata 10
   17048  * Slow System Clock.
   17049  *
   17050  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   17051  */
   17052 static int
   17053 wm_pll_workaround_i210(struct wm_softc *sc)
   17054 {
   17055 	uint32_t mdicnfg, wuc;
   17056 	uint32_t reg;
   17057 	pcireg_t pcireg;
   17058 	uint32_t pmreg;
   17059 	uint16_t nvmword, tmp_nvmword;
   17060 	uint16_t phyval;
   17061 	bool wa_done = false;
   17062 	int i, rv = 0;
   17063 
   17064 	/* Get Power Management cap offset */
   17065 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   17066 	    &pmreg, NULL) == 0)
   17067 		return -1;
   17068 
   17069 	/* Save WUC and MDICNFG registers */
   17070 	wuc = CSR_READ(sc, WMREG_WUC);
   17071 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   17072 
   17073 	reg = mdicnfg & ~MDICNFG_DEST;
   17074 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17075 
   17076 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   17077 		/*
   17078 		 * The default value of the Initialization Control Word 1
   17079 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   17080 		 */
   17081 		nvmword = INVM_DEFAULT_AL;
   17082 	}
   17083 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   17084 
   17085 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   17086 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   17087 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   17088 
   17089 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   17090 			rv = 0;
   17091 			break; /* OK */
   17092 		} else
   17093 			rv = -1;
   17094 
   17095 		wa_done = true;
   17096 		/* Directly reset the internal PHY */
   17097 		reg = CSR_READ(sc, WMREG_CTRL);
   17098 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   17099 
   17100 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17101 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   17102 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17103 
   17104 		CSR_WRITE(sc, WMREG_WUC, 0);
   17105 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   17106 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17107 
   17108 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17109 		    pmreg + PCI_PMCSR);
   17110 		pcireg |= PCI_PMCSR_STATE_D3;
   17111 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17112 		    pmreg + PCI_PMCSR, pcireg);
   17113 		delay(1000);
   17114 		pcireg &= ~PCI_PMCSR_STATE_D3;
   17115 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17116 		    pmreg + PCI_PMCSR, pcireg);
   17117 
   17118 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   17119 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17120 
   17121 		/* Restore WUC register */
   17122 		CSR_WRITE(sc, WMREG_WUC, wuc);
   17123 	}
   17124 
   17125 	/* Restore MDICNFG setting */
   17126 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   17127 	if (wa_done)
   17128 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   17129 	return rv;
   17130 }
   17131 
   17132 static void
   17133 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   17134 {
   17135 	uint32_t reg;
   17136 
   17137 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17138 		device_xname(sc->sc_dev), __func__));
   17139 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   17140 	    || (sc->sc_type == WM_T_PCH_CNP));
   17141 
   17142 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   17143 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   17144 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   17145 
   17146 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17147 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17148 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17149 }
   17150 
   17151 /* Sysctl functions */
   17152 static int
   17153 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
   17154 {
   17155 	struct sysctlnode node = *rnode;
   17156 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17157 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17158 	struct wm_softc *sc = txq->txq_sc;
   17159 	uint32_t reg;
   17160 
   17161 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
   17162 	node.sysctl_data = &reg;
   17163 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17164 }
   17165 
   17166 static int
   17167 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
   17168 {
   17169 	struct sysctlnode node = *rnode;
   17170 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17171 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17172 	struct wm_softc *sc = txq->txq_sc;
   17173 	uint32_t reg;
   17174 
   17175 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
   17176 	node.sysctl_data = &reg;
   17177 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17178 }
   17179 
   17180 #ifdef WM_DEBUG
   17181 static int
   17182 wm_sysctl_debug(SYSCTLFN_ARGS)
   17183 {
   17184 	struct sysctlnode node = *rnode;
   17185 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17186 	uint32_t dflags;
   17187 	int error;
   17188 
   17189 	dflags = sc->sc_debug;
   17190 	node.sysctl_data = &dflags;
   17191 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17192 
   17193 	if (error || newp == NULL)
   17194 		return error;
   17195 
   17196 	sc->sc_debug = dflags;
   17197 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
   17198 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
   17199 
   17200 	return 0;
   17201 }
   17202 #endif
   17203