Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.739
      1 /*	$NetBSD: if_wm.c,v 1.739 2022/07/11 06:15:27 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.739 2022/07/11 06:15:27 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 #include <sys/atomic.h>
    111 
    112 #include <sys/rndsource.h>
    113 
    114 #include <net/if.h>
    115 #include <net/if_dl.h>
    116 #include <net/if_media.h>
    117 #include <net/if_ether.h>
    118 
    119 #include <net/bpf.h>
    120 
    121 #include <net/rss_config.h>
    122 
    123 #include <netinet/in.h>			/* XXX for struct ip */
    124 #include <netinet/in_systm.h>		/* XXX for struct ip */
    125 #include <netinet/ip.h>			/* XXX for struct ip */
    126 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    127 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    128 
    129 #include <sys/bus.h>
    130 #include <sys/intr.h>
    131 #include <machine/endian.h>
    132 
    133 #include <dev/mii/mii.h>
    134 #include <dev/mii/mdio.h>
    135 #include <dev/mii/miivar.h>
    136 #include <dev/mii/miidevs.h>
    137 #include <dev/mii/mii_bitbang.h>
    138 #include <dev/mii/ikphyreg.h>
    139 #include <dev/mii/igphyreg.h>
    140 #include <dev/mii/igphyvar.h>
    141 #include <dev/mii/inbmphyreg.h>
    142 #include <dev/mii/ihphyreg.h>
    143 #include <dev/mii/makphyreg.h>
    144 
    145 #include <dev/pci/pcireg.h>
    146 #include <dev/pci/pcivar.h>
    147 #include <dev/pci/pcidevs.h>
    148 
    149 #include <dev/pci/if_wmreg.h>
    150 #include <dev/pci/if_wmvar.h>
    151 
    152 #ifdef WM_DEBUG
    153 #define	WM_DEBUG_LINK		__BIT(0)
    154 #define	WM_DEBUG_TX		__BIT(1)
    155 #define	WM_DEBUG_RX		__BIT(2)
    156 #define	WM_DEBUG_GMII		__BIT(3)
    157 #define	WM_DEBUG_MANAGE		__BIT(4)
    158 #define	WM_DEBUG_NVM		__BIT(5)
    159 #define	WM_DEBUG_INIT		__BIT(6)
    160 #define	WM_DEBUG_LOCK		__BIT(7)
    161 
    162 #if 0
    163 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
    164 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
    165 	WM_DEBUG_LOCK
    166 #endif
    167 
    168 #define	DPRINTF(sc, x, y)			  \
    169 	do {					  \
    170 		if ((sc)->sc_debug & (x))	  \
    171 			printf y;		  \
    172 	} while (0)
    173 #else
    174 #define	DPRINTF(sc, x, y)	__nothing
    175 #endif /* WM_DEBUG */
    176 
    177 #ifdef NET_MPSAFE
    178 #define WM_MPSAFE	1
    179 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
    180 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
    181 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    182 #else
    183 #define WM_CALLOUT_FLAGS	0
    184 #define WM_SOFTINT_FLAGS	0
    185 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    186 #endif
    187 
    188 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    189 
    190 /*
    191  * This device driver's max interrupt numbers.
    192  */
    193 #define WM_MAX_NQUEUEINTR	16
    194 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    195 
    196 #ifndef WM_DISABLE_MSI
    197 #define	WM_DISABLE_MSI 0
    198 #endif
    199 #ifndef WM_DISABLE_MSIX
    200 #define	WM_DISABLE_MSIX 0
    201 #endif
    202 
    203 int wm_disable_msi = WM_DISABLE_MSI;
    204 int wm_disable_msix = WM_DISABLE_MSIX;
    205 
    206 #ifndef WM_WATCHDOG_TIMEOUT
    207 #define WM_WATCHDOG_TIMEOUT 5
    208 #endif
    209 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    210 
    211 /*
    212  * Transmit descriptor list size.  Due to errata, we can only have
    213  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    214  * on >= 82544. We tell the upper layers that they can queue a lot
    215  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    216  * of them at a time.
    217  *
    218  * We allow up to 64 DMA segments per packet.  Pathological packet
    219  * chains containing many small mbufs have been observed in zero-copy
    220  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    221  * m_defrag() is called to reduce it.
    222  */
    223 #define	WM_NTXSEGS		64
    224 #define	WM_IFQUEUELEN		256
    225 #define	WM_TXQUEUELEN_MAX	64
    226 #define	WM_TXQUEUELEN_MAX_82547	16
    227 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    228 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    229 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    230 #define	WM_NTXDESC_82542	256
    231 #define	WM_NTXDESC_82544	4096
    232 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    233 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    234 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    235 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    236 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    237 
    238 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    239 
    240 #define	WM_TXINTERQSIZE		256
    241 
    242 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 /*
    250  * Receive descriptor list size.  We have one Rx buffer for normal
    251  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    252  * packet.  We allocate 256 receive descriptors, each with a 2k
    253  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    254  */
    255 #define	WM_NRXDESC		256U
    256 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    257 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    258 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    259 
    260 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    261 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    262 #endif
    263 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    264 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    265 #endif
    266 
    267 typedef union txdescs {
    268 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    269 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    270 } txdescs_t;
    271 
    272 typedef union rxdescs {
    273 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    274 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    275 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    276 } rxdescs_t;
    277 
    278 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    279 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    280 
    281 /*
    282  * Software state for transmit jobs.
    283  */
    284 struct wm_txsoft {
    285 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    287 	int txs_firstdesc;		/* first descriptor in packet */
    288 	int txs_lastdesc;		/* last descriptor in packet */
    289 	int txs_ndesc;			/* # of descriptors used */
    290 };
    291 
    292 /*
    293  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    294  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    295  * them together.
    296  */
    297 struct wm_rxsoft {
    298 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    299 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    300 };
    301 
    302 #define WM_LINKUP_TIMEOUT	50
    303 
    304 static uint16_t swfwphysem[] = {
    305 	SWFW_PHY0_SM,
    306 	SWFW_PHY1_SM,
    307 	SWFW_PHY2_SM,
    308 	SWFW_PHY3_SM
    309 };
    310 
    311 static const uint32_t wm_82580_rxpbs_table[] = {
    312 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    313 };
    314 
    315 struct wm_softc;
    316 
    317 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
    318 #if !defined(WM_EVENT_COUNTERS)
    319 #define WM_EVENT_COUNTERS 1
    320 #endif
    321 #endif
    322 
    323 #ifdef WM_EVENT_COUNTERS
    324 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    325 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    326 	struct evcnt qname##_ev_##evname
    327 
    328 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    329 	do {								\
    330 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    331 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    332 		    "%s%02d%s", #qname, (qnum), #evname);		\
    333 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    334 		    (evtype), NULL, (xname),				\
    335 		    (q)->qname##_##evname##_evcnt_name);		\
    336 	} while (0)
    337 
    338 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    339 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    340 
    341 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    342 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    343 
    344 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    345 	evcnt_detach(&(q)->qname##_ev_##evname)
    346 #endif /* WM_EVENT_COUNTERS */
    347 
    348 struct wm_txqueue {
    349 	kmutex_t *txq_lock;		/* lock for tx operations */
    350 
    351 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    352 
    353 	/* Software state for the transmit descriptors. */
    354 	int txq_num;			/* must be a power of two */
    355 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    356 
    357 	/* TX control data structures. */
    358 	int txq_ndesc;			/* must be a power of two */
    359 	size_t txq_descsize;		/* a tx descriptor size */
    360 	txdescs_t *txq_descs_u;
    361 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    362 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    363 	int txq_desc_rseg;		/* real number of control segment */
    364 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    365 #define	txq_descs	txq_descs_u->sctxu_txdescs
    366 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    367 
    368 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    369 
    370 	int txq_free;			/* number of free Tx descriptors */
    371 	int txq_next;			/* next ready Tx descriptor */
    372 
    373 	int txq_sfree;			/* number of free Tx jobs */
    374 	int txq_snext;			/* next free Tx job */
    375 	int txq_sdirty;			/* dirty Tx jobs */
    376 
    377 	/* These 4 variables are used only on the 82547. */
    378 	int txq_fifo_size;		/* Tx FIFO size */
    379 	int txq_fifo_head;		/* current head of FIFO */
    380 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    381 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    382 
    383 	/*
    384 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    385 	 * CPUs. This queue intermediate them without block.
    386 	 */
    387 	pcq_t *txq_interq;
    388 
    389 	/*
    390 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    391 	 * to manage Tx H/W queue's busy flag.
    392 	 */
    393 	int txq_flags;			/* flags for H/W queue, see below */
    394 #define	WM_TXQ_NO_SPACE		0x1
    395 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
    396 
    397 	bool txq_stopping;
    398 
    399 	bool txq_sending;
    400 	time_t txq_lastsent;
    401 
    402 	/* Checksum flags used for previous packet */
    403 	uint32_t	txq_last_hw_cmd;
    404 	uint8_t		txq_last_hw_fields;
    405 	uint16_t	txq_last_hw_ipcs;
    406 	uint16_t	txq_last_hw_tucs;
    407 
    408 	uint32_t txq_packets;		/* for AIM */
    409 	uint32_t txq_bytes;		/* for AIM */
    410 #ifdef WM_EVENT_COUNTERS
    411 	/* TX event counters */
    412 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
    413 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
    414 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
    415 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
    416 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
    417 					    /* XXX not used? */
    418 
    419 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
    420 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
    421 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
    422 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
    423 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
    424 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
    425 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
    426 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
    427 					    /* other than toomanyseg */
    428 
    429 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
    430 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
    431 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
    432 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
    433 
    434 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    435 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    436 #endif /* WM_EVENT_COUNTERS */
    437 };
    438 
    439 struct wm_rxqueue {
    440 	kmutex_t *rxq_lock;		/* lock for rx operations */
    441 
    442 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    443 
    444 	/* Software state for the receive descriptors. */
    445 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    446 
    447 	/* RX control data structures. */
    448 	int rxq_ndesc;			/* must be a power of two */
    449 	size_t rxq_descsize;		/* a rx descriptor size */
    450 	rxdescs_t *rxq_descs_u;
    451 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    452 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    453 	int rxq_desc_rseg;		/* real number of control segment */
    454 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    455 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    456 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    457 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    458 
    459 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    460 
    461 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    462 	int rxq_discard;
    463 	int rxq_len;
    464 	struct mbuf *rxq_head;
    465 	struct mbuf *rxq_tail;
    466 	struct mbuf **rxq_tailp;
    467 
    468 	bool rxq_stopping;
    469 
    470 	uint32_t rxq_packets;		/* for AIM */
    471 	uint32_t rxq_bytes;		/* for AIM */
    472 #ifdef WM_EVENT_COUNTERS
    473 	/* RX event counters */
    474 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    475 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    476 
    477 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    478 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    479 #endif
    480 };
    481 
    482 struct wm_queue {
    483 	int wmq_id;			/* index of TX/RX queues */
    484 	int wmq_intr_idx;		/* index of MSI-X tables */
    485 
    486 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    487 	bool wmq_set_itr;
    488 
    489 	struct wm_txqueue wmq_txq;
    490 	struct wm_rxqueue wmq_rxq;
    491 	char sysctlname[32];		/* Name for sysctl */
    492 
    493 	bool wmq_txrx_use_workqueue;
    494 	struct work wmq_cookie;
    495 	void *wmq_si;
    496 };
    497 
    498 struct wm_phyop {
    499 	int (*acquire)(struct wm_softc *);
    500 	void (*release)(struct wm_softc *);
    501 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    502 	int (*writereg_locked)(device_t, int, int, uint16_t);
    503 	int reset_delay_us;
    504 	bool no_errprint;
    505 };
    506 
    507 struct wm_nvmop {
    508 	int (*acquire)(struct wm_softc *);
    509 	void (*release)(struct wm_softc *);
    510 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    511 };
    512 
    513 /*
    514  * Software state per device.
    515  */
    516 struct wm_softc {
    517 	device_t sc_dev;		/* generic device information */
    518 	bus_space_tag_t sc_st;		/* bus space tag */
    519 	bus_space_handle_t sc_sh;	/* bus space handle */
    520 	bus_size_t sc_ss;		/* bus space size */
    521 	bus_space_tag_t sc_iot;		/* I/O space tag */
    522 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    523 	bus_size_t sc_ios;		/* I/O space size */
    524 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    525 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    526 	bus_size_t sc_flashs;		/* flash registers space size */
    527 	off_t sc_flashreg_offset;	/*
    528 					 * offset to flash registers from
    529 					 * start of BAR
    530 					 */
    531 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    532 
    533 	struct ethercom sc_ethercom;	/* Ethernet common data */
    534 	struct mii_data sc_mii;		/* MII/media information */
    535 
    536 	pci_chipset_tag_t sc_pc;
    537 	pcitag_t sc_pcitag;
    538 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    539 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    540 
    541 	uint16_t sc_pcidevid;		/* PCI device ID */
    542 	wm_chip_type sc_type;		/* MAC type */
    543 	int sc_rev;			/* MAC revision */
    544 	wm_phy_type sc_phytype;		/* PHY type */
    545 	uint8_t sc_sfptype;		/* SFP type */
    546 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    547 #define	WM_MEDIATYPE_UNKNOWN		0x00
    548 #define	WM_MEDIATYPE_FIBER		0x01
    549 #define	WM_MEDIATYPE_COPPER		0x02
    550 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    551 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    552 	int sc_flags;			/* flags; see below */
    553 	u_short sc_if_flags;		/* last if_flags */
    554 	int sc_ec_capenable;		/* last ec_capenable */
    555 	int sc_flowflags;		/* 802.3x flow control flags */
    556 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    557 	int sc_align_tweak;
    558 
    559 	void *sc_ihs[WM_MAX_NINTR];	/*
    560 					 * interrupt cookie.
    561 					 * - legacy and msi use sc_ihs[0] only
    562 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    563 					 */
    564 	pci_intr_handle_t *sc_intrs;	/*
    565 					 * legacy and msi use sc_intrs[0] only
    566 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    567 					 */
    568 	int sc_nintrs;			/* number of interrupts */
    569 
    570 	int sc_link_intr_idx;		/* index of MSI-X tables */
    571 
    572 	callout_t sc_tick_ch;		/* tick callout */
    573 	bool sc_core_stopping;
    574 
    575 	int sc_nvm_ver_major;
    576 	int sc_nvm_ver_minor;
    577 	int sc_nvm_ver_build;
    578 	int sc_nvm_addrbits;		/* NVM address bits */
    579 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    580 	int sc_ich8_flash_base;
    581 	int sc_ich8_flash_bank_size;
    582 	int sc_nvm_k1_enabled;
    583 
    584 	int sc_nqueues;
    585 	struct wm_queue *sc_queue;
    586 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    587 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    588 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    589 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    590 	struct workqueue *sc_queue_wq;
    591 	bool sc_txrx_use_workqueue;
    592 
    593 	int sc_affinity_offset;
    594 
    595 #ifdef WM_EVENT_COUNTERS
    596 	/* Event counters. */
    597 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    598 
    599 	/* WM_T_82542_2_1 only */
    600 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    601 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    602 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    603 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    604 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    605 #endif /* WM_EVENT_COUNTERS */
    606 
    607 	struct sysctllog *sc_sysctllog;
    608 
    609 	/* This variable are used only on the 82547. */
    610 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    611 
    612 	uint32_t sc_ctrl;		/* prototype CTRL register */
    613 #if 0
    614 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    615 #endif
    616 	uint32_t sc_icr;		/* prototype interrupt bits */
    617 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    618 	uint32_t sc_tctl;		/* prototype TCTL register */
    619 	uint32_t sc_rctl;		/* prototype RCTL register */
    620 	uint32_t sc_txcw;		/* prototype TXCW register */
    621 	uint32_t sc_tipg;		/* prototype TIPG register */
    622 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    623 	uint32_t sc_pba;		/* prototype PBA register */
    624 
    625 	int sc_tbi_linkup;		/* TBI link status */
    626 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    627 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    628 
    629 	int sc_mchash_type;		/* multicast filter offset */
    630 
    631 	krndsource_t rnd_source;	/* random source */
    632 
    633 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    634 
    635 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    636 	kmutex_t *sc_ich_phymtx;	/*
    637 					 * 82574/82583/ICH/PCH specific PHY
    638 					 * mutex. For 82574/82583, the mutex
    639 					 * is used for both PHY and NVM.
    640 					 */
    641 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    642 
    643 	struct wm_phyop phy;
    644 	struct wm_nvmop nvm;
    645 #ifdef WM_DEBUG
    646 	uint32_t sc_debug;
    647 #endif
    648 };
    649 
    650 #define WM_CORE_LOCK(_sc)						\
    651 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    652 #define WM_CORE_UNLOCK(_sc)						\
    653 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    654 #define WM_CORE_LOCKED(_sc)						\
    655 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    656 
    657 #define	WM_RXCHAIN_RESET(rxq)						\
    658 do {									\
    659 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    660 	*(rxq)->rxq_tailp = NULL;					\
    661 	(rxq)->rxq_len = 0;						\
    662 } while (/*CONSTCOND*/0)
    663 
    664 #define	WM_RXCHAIN_LINK(rxq, m)						\
    665 do {									\
    666 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    667 	(rxq)->rxq_tailp = &(m)->m_next;				\
    668 } while (/*CONSTCOND*/0)
    669 
    670 #ifdef WM_EVENT_COUNTERS
    671 #ifdef __HAVE_ATOMIC64_LOADSTORE
    672 #define	WM_EVCNT_INCR(ev)						\
    673 	atomic_store_relaxed(&((ev)->ev_count),				\
    674 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
    675 #define	WM_EVCNT_ADD(ev, val)						\
    676 	atomic_store_relaxed(&((ev)->ev_count),				\
    677 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
    678 #else
    679 #define	WM_EVCNT_INCR(ev)						\
    680 	((ev)->ev_count)++
    681 #define	WM_EVCNT_ADD(ev, val)						\
    682 	(ev)->ev_count += (val)
    683 #endif
    684 
    685 #define WM_Q_EVCNT_INCR(qname, evname)			\
    686 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    687 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    688 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    689 #else /* !WM_EVENT_COUNTERS */
    690 #define	WM_EVCNT_INCR(ev)	/* nothing */
    691 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    692 
    693 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    694 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    695 #endif /* !WM_EVENT_COUNTERS */
    696 
    697 #define	CSR_READ(sc, reg)						\
    698 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    699 #define	CSR_WRITE(sc, reg, val)						\
    700 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    701 #define	CSR_WRITE_FLUSH(sc)						\
    702 	(void)CSR_READ((sc), WMREG_STATUS)
    703 
    704 #define ICH8_FLASH_READ32(sc, reg)					\
    705 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    706 	    (reg) + sc->sc_flashreg_offset)
    707 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    708 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    709 	    (reg) + sc->sc_flashreg_offset, (data))
    710 
    711 #define ICH8_FLASH_READ16(sc, reg)					\
    712 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    713 	    (reg) + sc->sc_flashreg_offset)
    714 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    715 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    716 	    (reg) + sc->sc_flashreg_offset, (data))
    717 
    718 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    719 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    720 
    721 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    722 #define	WM_CDTXADDR_HI(txq, x)						\
    723 	(sizeof(bus_addr_t) == 8 ?					\
    724 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    725 
    726 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    727 #define	WM_CDRXADDR_HI(rxq, x)						\
    728 	(sizeof(bus_addr_t) == 8 ?					\
    729 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    730 
    731 /*
    732  * Register read/write functions.
    733  * Other than CSR_{READ|WRITE}().
    734  */
    735 #if 0
    736 static inline uint32_t wm_io_read(struct wm_softc *, int);
    737 #endif
    738 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    739 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    740     uint32_t, uint32_t);
    741 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    742 
    743 /*
    744  * Descriptor sync/init functions.
    745  */
    746 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    747 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    748 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    749 
    750 /*
    751  * Device driver interface functions and commonly used functions.
    752  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    753  */
    754 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    755 static int	wm_match(device_t, cfdata_t, void *);
    756 static void	wm_attach(device_t, device_t, void *);
    757 static int	wm_detach(device_t, int);
    758 static bool	wm_suspend(device_t, const pmf_qual_t *);
    759 static bool	wm_resume(device_t, const pmf_qual_t *);
    760 static void	wm_watchdog(struct ifnet *);
    761 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    762     uint16_t *);
    763 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    764     uint16_t *);
    765 static void	wm_tick(void *);
    766 static int	wm_ifflags_cb(struct ethercom *);
    767 static int	wm_ioctl(struct ifnet *, u_long, void *);
    768 /* MAC address related */
    769 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    770 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    771 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    772 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    773 static int	wm_rar_count(struct wm_softc *);
    774 static void	wm_set_filter(struct wm_softc *);
    775 /* Reset and init related */
    776 static void	wm_set_vlan(struct wm_softc *);
    777 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    778 static void	wm_get_auto_rd_done(struct wm_softc *);
    779 static void	wm_lan_init_done(struct wm_softc *);
    780 static void	wm_get_cfg_done(struct wm_softc *);
    781 static int	wm_phy_post_reset(struct wm_softc *);
    782 static int	wm_write_smbus_addr(struct wm_softc *);
    783 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    784 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    785 static void	wm_initialize_hardware_bits(struct wm_softc *);
    786 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    787 static int	wm_reset_phy(struct wm_softc *);
    788 static void	wm_flush_desc_rings(struct wm_softc *);
    789 static void	wm_reset(struct wm_softc *);
    790 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    791 static void	wm_rxdrain(struct wm_rxqueue *);
    792 static void	wm_init_rss(struct wm_softc *);
    793 static void	wm_adjust_qnum(struct wm_softc *, int);
    794 static inline bool	wm_is_using_msix(struct wm_softc *);
    795 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    796 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
    797 static int	wm_setup_legacy(struct wm_softc *);
    798 static int	wm_setup_msix(struct wm_softc *);
    799 static int	wm_init(struct ifnet *);
    800 static int	wm_init_locked(struct ifnet *);
    801 static void	wm_init_sysctls(struct wm_softc *);
    802 static void	wm_unset_stopping_flags(struct wm_softc *);
    803 static void	wm_set_stopping_flags(struct wm_softc *);
    804 static void	wm_stop(struct ifnet *, int);
    805 static void	wm_stop_locked(struct ifnet *, bool, bool);
    806 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    807 static void	wm_82547_txfifo_stall(void *);
    808 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    809 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    810 /* DMA related */
    811 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    812 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    813 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    814 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    815     struct wm_txqueue *);
    816 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    817 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    818 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    819     struct wm_rxqueue *);
    820 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    821 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    822 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    823 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    824 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    825 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    826 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    827     struct wm_txqueue *);
    828 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    829     struct wm_rxqueue *);
    830 static int	wm_alloc_txrx_queues(struct wm_softc *);
    831 static void	wm_free_txrx_queues(struct wm_softc *);
    832 static int	wm_init_txrx_queues(struct wm_softc *);
    833 /* Start */
    834 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    835     struct wm_txsoft *, uint32_t *, uint8_t *);
    836 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    837 static void	wm_start(struct ifnet *);
    838 static void	wm_start_locked(struct ifnet *);
    839 static int	wm_transmit(struct ifnet *, struct mbuf *);
    840 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    841 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    842 		    bool);
    843 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    844     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    845 static void	wm_nq_start(struct ifnet *);
    846 static void	wm_nq_start_locked(struct ifnet *);
    847 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    848 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    849 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    850 		    bool);
    851 static void	wm_deferred_start_locked(struct wm_txqueue *);
    852 static void	wm_handle_queue(void *);
    853 static void	wm_handle_queue_work(struct work *, void *);
    854 /* Interrupt */
    855 static bool	wm_txeof(struct wm_txqueue *, u_int);
    856 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    857 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    858 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    859 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    860 static void	wm_linkintr(struct wm_softc *, uint32_t);
    861 static int	wm_intr_legacy(void *);
    862 static inline void	wm_txrxintr_disable(struct wm_queue *);
    863 static inline void	wm_txrxintr_enable(struct wm_queue *);
    864 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    865 static int	wm_txrxintr_msix(void *);
    866 static int	wm_linkintr_msix(void *);
    867 
    868 /*
    869  * Media related.
    870  * GMII, SGMII, TBI, SERDES and SFP.
    871  */
    872 /* Common */
    873 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    874 /* GMII related */
    875 static void	wm_gmii_reset(struct wm_softc *);
    876 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    877 static int	wm_get_phy_id_82575(struct wm_softc *);
    878 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    879 static int	wm_gmii_mediachange(struct ifnet *);
    880 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    881 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    882 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    883 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    884 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    885 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    886 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    887 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    888 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    889 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    890 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    891 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    892 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    893 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    894 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    895 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    896 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    897 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    898 	bool);
    899 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    900 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    901 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    902 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    903 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    904 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    905 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    906 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    907 static void	wm_gmii_statchg(struct ifnet *);
    908 /*
    909  * kumeran related (80003, ICH* and PCH*).
    910  * These functions are not for accessing MII registers but for accessing
    911  * kumeran specific registers.
    912  */
    913 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    914 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    915 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    916 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    917 /* EMI register related */
    918 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    919 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    920 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    921 /* SGMII */
    922 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    923 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
    924 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    925 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    926 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    927 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    928 /* TBI related */
    929 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    930 static void	wm_tbi_mediainit(struct wm_softc *);
    931 static int	wm_tbi_mediachange(struct ifnet *);
    932 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    933 static int	wm_check_for_link(struct wm_softc *);
    934 static void	wm_tbi_tick(struct wm_softc *);
    935 /* SERDES related */
    936 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    937 static int	wm_serdes_mediachange(struct ifnet *);
    938 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    939 static void	wm_serdes_tick(struct wm_softc *);
    940 /* SFP related */
    941 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    942 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    943 
    944 /*
    945  * NVM related.
    946  * Microwire, SPI (w/wo EERD) and Flash.
    947  */
    948 /* Misc functions */
    949 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    950 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    951 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    952 /* Microwire */
    953 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    954 /* SPI */
    955 static int	wm_nvm_ready_spi(struct wm_softc *);
    956 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    957 /* Using with EERD */
    958 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    959 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    960 /* Flash */
    961 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    962     unsigned int *);
    963 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    964 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    965 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    966     uint32_t *);
    967 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    968 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    969 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    970 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    971 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    972 /* iNVM */
    973 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    974 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    975 /* Lock, detecting NVM type, validate checksum and read */
    976 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    977 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    978 static int	wm_nvm_validate_checksum(struct wm_softc *);
    979 static void	wm_nvm_version_invm(struct wm_softc *);
    980 static void	wm_nvm_version(struct wm_softc *);
    981 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    982 
    983 /*
    984  * Hardware semaphores.
    985  * Very complexed...
    986  */
    987 static int	wm_get_null(struct wm_softc *);
    988 static void	wm_put_null(struct wm_softc *);
    989 static int	wm_get_eecd(struct wm_softc *);
    990 static void	wm_put_eecd(struct wm_softc *);
    991 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    992 static void	wm_put_swsm_semaphore(struct wm_softc *);
    993 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    994 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    995 static int	wm_get_nvm_80003(struct wm_softc *);
    996 static void	wm_put_nvm_80003(struct wm_softc *);
    997 static int	wm_get_nvm_82571(struct wm_softc *);
    998 static void	wm_put_nvm_82571(struct wm_softc *);
    999 static int	wm_get_phy_82575(struct wm_softc *);
   1000 static void	wm_put_phy_82575(struct wm_softc *);
   1001 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
   1002 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
   1003 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
   1004 static void	wm_put_swflag_ich8lan(struct wm_softc *);
   1005 static int	wm_get_nvm_ich8lan(struct wm_softc *);
   1006 static void	wm_put_nvm_ich8lan(struct wm_softc *);
   1007 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
   1008 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
   1009 
   1010 /*
   1011  * Management mode and power management related subroutines.
   1012  * BMC, AMT, suspend/resume and EEE.
   1013  */
   1014 #if 0
   1015 static int	wm_check_mng_mode(struct wm_softc *);
   1016 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
   1017 static int	wm_check_mng_mode_82574(struct wm_softc *);
   1018 static int	wm_check_mng_mode_generic(struct wm_softc *);
   1019 #endif
   1020 static int	wm_enable_mng_pass_thru(struct wm_softc *);
   1021 static bool	wm_phy_resetisblocked(struct wm_softc *);
   1022 static void	wm_get_hw_control(struct wm_softc *);
   1023 static void	wm_release_hw_control(struct wm_softc *);
   1024 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
   1025 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
   1026 static void	wm_init_manageability(struct wm_softc *);
   1027 static void	wm_release_manageability(struct wm_softc *);
   1028 static void	wm_get_wakeup(struct wm_softc *);
   1029 static int	wm_ulp_disable(struct wm_softc *);
   1030 static int	wm_enable_phy_wakeup(struct wm_softc *);
   1031 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
   1032 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
   1033 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
   1034 static void	wm_enable_wakeup(struct wm_softc *);
   1035 static void	wm_disable_aspm(struct wm_softc *);
   1036 /* LPLU (Low Power Link Up) */
   1037 static void	wm_lplu_d0_disable(struct wm_softc *);
   1038 /* EEE */
   1039 static int	wm_set_eee_i350(struct wm_softc *);
   1040 static int	wm_set_eee_pchlan(struct wm_softc *);
   1041 static int	wm_set_eee(struct wm_softc *);
   1042 
   1043 /*
   1044  * Workarounds (mainly PHY related).
   1045  * Basically, PHY's workarounds are in the PHY drivers.
   1046  */
   1047 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1048 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1049 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1050 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1051 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
   1052 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
   1053 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1054 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1055 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1056 static int	wm_k1_workaround_lv(struct wm_softc *);
   1057 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1058 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1059 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1060 static void	wm_reset_init_script_82575(struct wm_softc *);
   1061 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1062 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1063 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1064 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1065 static int	wm_pll_workaround_i210(struct wm_softc *);
   1066 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1067 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
   1068 static void	wm_set_linkdown_discard(struct wm_softc *);
   1069 static void	wm_clear_linkdown_discard(struct wm_softc *);
   1070 
   1071 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
   1072 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
   1073 #ifdef WM_DEBUG
   1074 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
   1075 #endif
   1076 
   1077 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1078     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1079 
   1080 /*
   1081  * Devices supported by this driver.
   1082  */
   1083 static const struct wm_product {
   1084 	pci_vendor_id_t		wmp_vendor;
   1085 	pci_product_id_t	wmp_product;
   1086 	const char		*wmp_name;
   1087 	wm_chip_type		wmp_type;
   1088 	uint32_t		wmp_flags;
   1089 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1090 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1091 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1092 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1093 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1094 } wm_products[] = {
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1096 	  "Intel i82542 1000BASE-X Ethernet",
   1097 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1100 	  "Intel i82543GC 1000BASE-X Ethernet",
   1101 	  WM_T_82543,		WMP_F_FIBER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1104 	  "Intel i82543GC 1000BASE-T Ethernet",
   1105 	  WM_T_82543,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1108 	  "Intel i82544EI 1000BASE-T Ethernet",
   1109 	  WM_T_82544,		WMP_F_COPPER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1112 	  "Intel i82544EI 1000BASE-X Ethernet",
   1113 	  WM_T_82544,		WMP_F_FIBER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1116 	  "Intel i82544GC 1000BASE-T Ethernet",
   1117 	  WM_T_82544,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1120 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1121 	  WM_T_82544,		WMP_F_COPPER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1124 	  "Intel i82540EM 1000BASE-T Ethernet",
   1125 	  WM_T_82540,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1128 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1129 	  WM_T_82540,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1132 	  "Intel i82540EP 1000BASE-T Ethernet",
   1133 	  WM_T_82540,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1136 	  "Intel i82540EP 1000BASE-T Ethernet",
   1137 	  WM_T_82540,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1140 	  "Intel i82540EP 1000BASE-T Ethernet",
   1141 	  WM_T_82540,		WMP_F_COPPER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1144 	  "Intel i82545EM 1000BASE-T Ethernet",
   1145 	  WM_T_82545,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1148 	  "Intel i82545GM 1000BASE-T Ethernet",
   1149 	  WM_T_82545_3,		WMP_F_COPPER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1152 	  "Intel i82545GM 1000BASE-X Ethernet",
   1153 	  WM_T_82545_3,		WMP_F_FIBER },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1156 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1157 	  WM_T_82545_3,		WMP_F_SERDES },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1160 	  "Intel i82546EB 1000BASE-T Ethernet",
   1161 	  WM_T_82546,		WMP_F_COPPER },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1164 	  "Intel i82546EB 1000BASE-T Ethernet",
   1165 	  WM_T_82546,		WMP_F_COPPER },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1168 	  "Intel i82545EM 1000BASE-X Ethernet",
   1169 	  WM_T_82545,		WMP_F_FIBER },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1172 	  "Intel i82546EB 1000BASE-X Ethernet",
   1173 	  WM_T_82546,		WMP_F_FIBER },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1176 	  "Intel i82546GB 1000BASE-T Ethernet",
   1177 	  WM_T_82546_3,		WMP_F_COPPER },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1180 	  "Intel i82546GB 1000BASE-X Ethernet",
   1181 	  WM_T_82546_3,		WMP_F_FIBER },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1184 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1185 	  WM_T_82546_3,		WMP_F_SERDES },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1188 	  "i82546GB quad-port Gigabit Ethernet",
   1189 	  WM_T_82546_3,		WMP_F_COPPER },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1192 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1193 	  WM_T_82546_3,		WMP_F_COPPER },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1196 	  "Intel PRO/1000MT (82546GB)",
   1197 	  WM_T_82546_3,		WMP_F_COPPER },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1200 	  "Intel i82541EI 1000BASE-T Ethernet",
   1201 	  WM_T_82541,		WMP_F_COPPER },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1204 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1205 	  WM_T_82541,		WMP_F_COPPER },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1208 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1209 	  WM_T_82541,		WMP_F_COPPER },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1212 	  "Intel i82541ER 1000BASE-T Ethernet",
   1213 	  WM_T_82541_2,		WMP_F_COPPER },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1216 	  "Intel i82541GI 1000BASE-T Ethernet",
   1217 	  WM_T_82541_2,		WMP_F_COPPER },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1220 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1221 	  WM_T_82541_2,		WMP_F_COPPER },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1224 	  "Intel i82541PI 1000BASE-T Ethernet",
   1225 	  WM_T_82541_2,		WMP_F_COPPER },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1228 	  "Intel i82547EI 1000BASE-T Ethernet",
   1229 	  WM_T_82547,		WMP_F_COPPER },
   1230 
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1232 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1233 	  WM_T_82547,		WMP_F_COPPER },
   1234 
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1236 	  "Intel i82547GI 1000BASE-T Ethernet",
   1237 	  WM_T_82547_2,		WMP_F_COPPER },
   1238 
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1240 	  "Intel PRO/1000 PT (82571EB)",
   1241 	  WM_T_82571,		WMP_F_COPPER },
   1242 
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1244 	  "Intel PRO/1000 PF (82571EB)",
   1245 	  WM_T_82571,		WMP_F_FIBER },
   1246 
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1248 	  "Intel PRO/1000 PB (82571EB)",
   1249 	  WM_T_82571,		WMP_F_SERDES },
   1250 
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1252 	  "Intel PRO/1000 QT (82571EB)",
   1253 	  WM_T_82571,		WMP_F_COPPER },
   1254 
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1256 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1257 	  WM_T_82571,		WMP_F_COPPER },
   1258 
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1260 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1261 	  WM_T_82571,		WMP_F_COPPER },
   1262 
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1264 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1265 	  WM_T_82571,		WMP_F_SERDES },
   1266 
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1268 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1269 	  WM_T_82571,		WMP_F_SERDES },
   1270 
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1272 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1273 	  WM_T_82571,		WMP_F_FIBER },
   1274 
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1276 	  "Intel i82572EI 1000baseT Ethernet",
   1277 	  WM_T_82572,		WMP_F_COPPER },
   1278 
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1280 	  "Intel i82572EI 1000baseX Ethernet",
   1281 	  WM_T_82572,		WMP_F_FIBER },
   1282 
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1284 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1285 	  WM_T_82572,		WMP_F_SERDES },
   1286 
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1288 	  "Intel i82572EI 1000baseT Ethernet",
   1289 	  WM_T_82572,		WMP_F_COPPER },
   1290 
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1292 	  "Intel i82573E",
   1293 	  WM_T_82573,		WMP_F_COPPER },
   1294 
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1296 	  "Intel i82573E IAMT",
   1297 	  WM_T_82573,		WMP_F_COPPER },
   1298 
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1300 	  "Intel i82573L Gigabit Ethernet",
   1301 	  WM_T_82573,		WMP_F_COPPER },
   1302 
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1304 	  "Intel i82574L",
   1305 	  WM_T_82574,		WMP_F_COPPER },
   1306 
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1308 	  "Intel i82574L",
   1309 	  WM_T_82574,		WMP_F_COPPER },
   1310 
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1312 	  "Intel i82583V",
   1313 	  WM_T_82583,		WMP_F_COPPER },
   1314 
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1316 	  "i80003 dual 1000baseT Ethernet",
   1317 	  WM_T_80003,		WMP_F_COPPER },
   1318 
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1320 	  "i80003 dual 1000baseX Ethernet",
   1321 	  WM_T_80003,		WMP_F_COPPER },
   1322 
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1324 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1325 	  WM_T_80003,		WMP_F_SERDES },
   1326 
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1328 	  "Intel i80003 1000baseT Ethernet",
   1329 	  WM_T_80003,		WMP_F_COPPER },
   1330 
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1332 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1333 	  WM_T_80003,		WMP_F_SERDES },
   1334 
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1336 	  "Intel i82801H (M_AMT) LAN Controller",
   1337 	  WM_T_ICH8,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1339 	  "Intel i82801H (AMT) LAN Controller",
   1340 	  WM_T_ICH8,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1342 	  "Intel i82801H LAN Controller",
   1343 	  WM_T_ICH8,		WMP_F_COPPER },
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1345 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1346 	  WM_T_ICH8,		WMP_F_COPPER },
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1348 	  "Intel i82801H (M) LAN Controller",
   1349 	  WM_T_ICH8,		WMP_F_COPPER },
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1351 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1352 	  WM_T_ICH8,		WMP_F_COPPER },
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1354 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1355 	  WM_T_ICH8,		WMP_F_COPPER },
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1357 	  "82567V-3 LAN Controller",
   1358 	  WM_T_ICH8,		WMP_F_COPPER },
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1360 	  "82801I (AMT) LAN Controller",
   1361 	  WM_T_ICH9,		WMP_F_COPPER },
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1363 	  "82801I 10/100 LAN Controller",
   1364 	  WM_T_ICH9,		WMP_F_COPPER },
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1366 	  "82801I (G) 10/100 LAN Controller",
   1367 	  WM_T_ICH9,		WMP_F_COPPER },
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1369 	  "82801I (GT) 10/100 LAN Controller",
   1370 	  WM_T_ICH9,		WMP_F_COPPER },
   1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1372 	  "82801I (C) LAN Controller",
   1373 	  WM_T_ICH9,		WMP_F_COPPER },
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1375 	  "82801I mobile LAN Controller",
   1376 	  WM_T_ICH9,		WMP_F_COPPER },
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1378 	  "82801I mobile (V) LAN Controller",
   1379 	  WM_T_ICH9,		WMP_F_COPPER },
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1381 	  "82801I mobile (AMT) LAN Controller",
   1382 	  WM_T_ICH9,		WMP_F_COPPER },
   1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1384 	  "82567LM-4 LAN Controller",
   1385 	  WM_T_ICH9,		WMP_F_COPPER },
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1387 	  "82567LM-2 LAN Controller",
   1388 	  WM_T_ICH10,		WMP_F_COPPER },
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1390 	  "82567LF-2 LAN Controller",
   1391 	  WM_T_ICH10,		WMP_F_COPPER },
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1393 	  "82567LM-3 LAN Controller",
   1394 	  WM_T_ICH10,		WMP_F_COPPER },
   1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1396 	  "82567LF-3 LAN Controller",
   1397 	  WM_T_ICH10,		WMP_F_COPPER },
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1399 	  "82567V-2 LAN Controller",
   1400 	  WM_T_ICH10,		WMP_F_COPPER },
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1402 	  "82567V-3? LAN Controller",
   1403 	  WM_T_ICH10,		WMP_F_COPPER },
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1405 	  "HANKSVILLE LAN Controller",
   1406 	  WM_T_ICH10,		WMP_F_COPPER },
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1408 	  "PCH LAN (82577LM) Controller",
   1409 	  WM_T_PCH,		WMP_F_COPPER },
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1411 	  "PCH LAN (82577LC) Controller",
   1412 	  WM_T_PCH,		WMP_F_COPPER },
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1414 	  "PCH LAN (82578DM) Controller",
   1415 	  WM_T_PCH,		WMP_F_COPPER },
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1417 	  "PCH LAN (82578DC) Controller",
   1418 	  WM_T_PCH,		WMP_F_COPPER },
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1420 	  "PCH2 LAN (82579LM) Controller",
   1421 	  WM_T_PCH2,		WMP_F_COPPER },
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1423 	  "PCH2 LAN (82579V) Controller",
   1424 	  WM_T_PCH2,		WMP_F_COPPER },
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1426 	  "82575EB dual-1000baseT Ethernet",
   1427 	  WM_T_82575,		WMP_F_COPPER },
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1429 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1430 	  WM_T_82575,		WMP_F_SERDES },
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1432 	  "82575GB quad-1000baseT Ethernet",
   1433 	  WM_T_82575,		WMP_F_COPPER },
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1435 	  "82575GB quad-1000baseT Ethernet (PM)",
   1436 	  WM_T_82575,		WMP_F_COPPER },
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1438 	  "82576 1000BaseT Ethernet",
   1439 	  WM_T_82576,		WMP_F_COPPER },
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1441 	  "82576 1000BaseX Ethernet",
   1442 	  WM_T_82576,		WMP_F_FIBER },
   1443 
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1445 	  "82576 gigabit Ethernet (SERDES)",
   1446 	  WM_T_82576,		WMP_F_SERDES },
   1447 
   1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1449 	  "82576 quad-1000BaseT Ethernet",
   1450 	  WM_T_82576,		WMP_F_COPPER },
   1451 
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1453 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1454 	  WM_T_82576,		WMP_F_COPPER },
   1455 
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1457 	  "82576 gigabit Ethernet",
   1458 	  WM_T_82576,		WMP_F_COPPER },
   1459 
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1461 	  "82576 gigabit Ethernet (SERDES)",
   1462 	  WM_T_82576,		WMP_F_SERDES },
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1464 	  "82576 quad-gigabit Ethernet (SERDES)",
   1465 	  WM_T_82576,		WMP_F_SERDES },
   1466 
   1467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1468 	  "82580 1000BaseT Ethernet",
   1469 	  WM_T_82580,		WMP_F_COPPER },
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1471 	  "82580 1000BaseX Ethernet",
   1472 	  WM_T_82580,		WMP_F_FIBER },
   1473 
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1475 	  "82580 1000BaseT Ethernet (SERDES)",
   1476 	  WM_T_82580,		WMP_F_SERDES },
   1477 
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1479 	  "82580 gigabit Ethernet (SGMII)",
   1480 	  WM_T_82580,		WMP_F_COPPER },
   1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1482 	  "82580 dual-1000BaseT Ethernet",
   1483 	  WM_T_82580,		WMP_F_COPPER },
   1484 
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1486 	  "82580 quad-1000BaseX Ethernet",
   1487 	  WM_T_82580,		WMP_F_FIBER },
   1488 
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1490 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1491 	  WM_T_82580,		WMP_F_COPPER },
   1492 
   1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1494 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1495 	  WM_T_82580,		WMP_F_SERDES },
   1496 
   1497 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1498 	  "DH89XXCC 1000BASE-KX Ethernet",
   1499 	  WM_T_82580,		WMP_F_SERDES },
   1500 
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1502 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1503 	  WM_T_82580,		WMP_F_SERDES },
   1504 
   1505 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1506 	  "I350 Gigabit Network Connection",
   1507 	  WM_T_I350,		WMP_F_COPPER },
   1508 
   1509 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1510 	  "I350 Gigabit Fiber Network Connection",
   1511 	  WM_T_I350,		WMP_F_FIBER },
   1512 
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1514 	  "I350 Gigabit Backplane Connection",
   1515 	  WM_T_I350,		WMP_F_SERDES },
   1516 
   1517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1518 	  "I350 Quad Port Gigabit Ethernet",
   1519 	  WM_T_I350,		WMP_F_SERDES },
   1520 
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1522 	  "I350 Gigabit Connection",
   1523 	  WM_T_I350,		WMP_F_COPPER },
   1524 
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1526 	  "I354 Gigabit Ethernet (KX)",
   1527 	  WM_T_I354,		WMP_F_SERDES },
   1528 
   1529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1530 	  "I354 Gigabit Ethernet (SGMII)",
   1531 	  WM_T_I354,		WMP_F_COPPER },
   1532 
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1534 	  "I354 Gigabit Ethernet (2.5G)",
   1535 	  WM_T_I354,		WMP_F_COPPER },
   1536 
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1538 	  "I210-T1 Ethernet Server Adapter",
   1539 	  WM_T_I210,		WMP_F_COPPER },
   1540 
   1541 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1542 	  "I210 Ethernet (Copper OEM)",
   1543 	  WM_T_I210,		WMP_F_COPPER },
   1544 
   1545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1546 	  "I210 Ethernet (Copper IT)",
   1547 	  WM_T_I210,		WMP_F_COPPER },
   1548 
   1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1550 	  "I210 Ethernet (Copper, FLASH less)",
   1551 	  WM_T_I210,		WMP_F_COPPER },
   1552 
   1553 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1554 	  "I210 Gigabit Ethernet (Fiber)",
   1555 	  WM_T_I210,		WMP_F_FIBER },
   1556 
   1557 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1558 	  "I210 Gigabit Ethernet (SERDES)",
   1559 	  WM_T_I210,		WMP_F_SERDES },
   1560 
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1562 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1563 	  WM_T_I210,		WMP_F_SERDES },
   1564 
   1565 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1566 	  "I210 Gigabit Ethernet (SGMII)",
   1567 	  WM_T_I210,		WMP_F_COPPER },
   1568 
   1569 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1570 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1571 	  WM_T_I210,		WMP_F_COPPER },
   1572 
   1573 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1574 	  "I211 Ethernet (COPPER)",
   1575 	  WM_T_I211,		WMP_F_COPPER },
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1577 	  "I217 V Ethernet Connection",
   1578 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1580 	  "I217 LM Ethernet Connection",
   1581 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1582 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1583 	  "I218 V Ethernet Connection",
   1584 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1585 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1586 	  "I218 V Ethernet Connection",
   1587 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1588 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1589 	  "I218 V Ethernet Connection",
   1590 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1591 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1592 	  "I218 LM Ethernet Connection",
   1593 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1594 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1595 	  "I218 LM Ethernet Connection",
   1596 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1597 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1598 	  "I218 LM Ethernet Connection",
   1599 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1600 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1601 	  "I219 LM Ethernet Connection",
   1602 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1603 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1604 	  "I219 LM (2) Ethernet Connection",
   1605 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1606 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1607 	  "I219 LM (3) Ethernet Connection",
   1608 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1609 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1610 	  "I219 LM (4) Ethernet Connection",
   1611 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1612 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1613 	  "I219 LM (5) Ethernet Connection",
   1614 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1615 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1616 	  "I219 LM (6) Ethernet Connection",
   1617 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1618 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1619 	  "I219 LM (7) Ethernet Connection",
   1620 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1621 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1622 	  "I219 LM (8) Ethernet Connection",
   1623 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1624 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1625 	  "I219 LM (9) Ethernet Connection",
   1626 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1627 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1628 	  "I219 LM (10) Ethernet Connection",
   1629 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1630 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1631 	  "I219 LM (11) Ethernet Connection",
   1632 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1633 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1634 	  "I219 LM (12) Ethernet Connection",
   1635 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1636 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1637 	  "I219 LM (13) Ethernet Connection",
   1638 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1639 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1640 	  "I219 LM (14) Ethernet Connection",
   1641 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1642 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1643 	  "I219 LM (15) Ethernet Connection",
   1644 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1645 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
   1646 	  "I219 LM (16) Ethernet Connection",
   1647 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1648 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
   1649 	  "I219 LM (17) Ethernet Connection",
   1650 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1651 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
   1652 	  "I219 LM (18) Ethernet Connection",
   1653 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1654 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
   1655 	  "I219 LM (19) Ethernet Connection",
   1656 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1657 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1658 	  "I219 V Ethernet Connection",
   1659 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1660 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1661 	  "I219 V (2) Ethernet Connection",
   1662 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1663 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1664 	  "I219 V (4) Ethernet Connection",
   1665 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1666 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1667 	  "I219 V (5) Ethernet Connection",
   1668 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1669 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1670 	  "I219 V (6) Ethernet Connection",
   1671 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1672 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1673 	  "I219 V (7) Ethernet Connection",
   1674 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1675 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1676 	  "I219 V (8) Ethernet Connection",
   1677 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1678 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1679 	  "I219 V (9) Ethernet Connection",
   1680 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1681 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1682 	  "I219 V (10) Ethernet Connection",
   1683 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1684 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1685 	  "I219 V (11) Ethernet Connection",
   1686 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1687 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1688 	  "I219 V (12) Ethernet Connection",
   1689 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1690 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1691 	  "I219 V (13) Ethernet Connection",
   1692 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1693 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1694 	  "I219 V (14) Ethernet Connection",
   1695 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1696 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
   1697 	  "I219 V (15) Ethernet Connection",
   1698 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1699 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
   1700 	  "I219 V (16) Ethernet Connection",
   1701 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1702 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
   1703 	  "I219 V (17) Ethernet Connection",
   1704 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1705 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
   1706 	  "I219 V (18) Ethernet Connection",
   1707 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1708 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
   1709 	  "I219 V (19) Ethernet Connection",
   1710 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1711 	{ 0,			0,
   1712 	  NULL,
   1713 	  0,			0 },
   1714 };
   1715 
   1716 /*
   1717  * Register read/write functions.
   1718  * Other than CSR_{READ|WRITE}().
   1719  */
   1720 
   1721 #if 0 /* Not currently used */
   1722 static inline uint32_t
   1723 wm_io_read(struct wm_softc *sc, int reg)
   1724 {
   1725 
   1726 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1727 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1728 }
   1729 #endif
   1730 
   1731 static inline void
   1732 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1733 {
   1734 
   1735 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1736 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1737 }
   1738 
   1739 static inline void
   1740 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1741     uint32_t data)
   1742 {
   1743 	uint32_t regval;
   1744 	int i;
   1745 
   1746 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1747 
   1748 	CSR_WRITE(sc, reg, regval);
   1749 
   1750 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1751 		delay(5);
   1752 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1753 			break;
   1754 	}
   1755 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1756 		aprint_error("%s: WARNING:"
   1757 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1758 		    device_xname(sc->sc_dev), reg);
   1759 	}
   1760 }
   1761 
   1762 static inline void
   1763 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1764 {
   1765 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
   1766 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
   1767 }
   1768 
   1769 /*
   1770  * Descriptor sync/init functions.
   1771  */
   1772 static inline void
   1773 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1774 {
   1775 	struct wm_softc *sc = txq->txq_sc;
   1776 
   1777 	/* If it will wrap around, sync to the end of the ring. */
   1778 	if ((start + num) > WM_NTXDESC(txq)) {
   1779 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1780 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1781 		    (WM_NTXDESC(txq) - start), ops);
   1782 		num -= (WM_NTXDESC(txq) - start);
   1783 		start = 0;
   1784 	}
   1785 
   1786 	/* Now sync whatever is left. */
   1787 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1788 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1789 }
   1790 
   1791 static inline void
   1792 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1793 {
   1794 	struct wm_softc *sc = rxq->rxq_sc;
   1795 
   1796 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1797 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1798 }
   1799 
   1800 static inline void
   1801 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1802 {
   1803 	struct wm_softc *sc = rxq->rxq_sc;
   1804 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1805 	struct mbuf *m = rxs->rxs_mbuf;
   1806 
   1807 	/*
   1808 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1809 	 * so that the payload after the Ethernet header is aligned
   1810 	 * to a 4-byte boundary.
   1811 
   1812 	 * XXX BRAINDAMAGE ALERT!
   1813 	 * The stupid chip uses the same size for every buffer, which
   1814 	 * is set in the Receive Control register.  We are using the 2K
   1815 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1816 	 * reason, we can't "scoot" packets longer than the standard
   1817 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1818 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1819 	 * the upper layer copy the headers.
   1820 	 */
   1821 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1822 
   1823 	if (sc->sc_type == WM_T_82574) {
   1824 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1825 		rxd->erx_data.erxd_addr =
   1826 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1827 		rxd->erx_data.erxd_dd = 0;
   1828 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1829 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1830 
   1831 		rxd->nqrx_data.nrxd_paddr =
   1832 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1833 		/* Currently, split header is not supported. */
   1834 		rxd->nqrx_data.nrxd_haddr = 0;
   1835 	} else {
   1836 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1837 
   1838 		wm_set_dma_addr(&rxd->wrx_addr,
   1839 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1840 		rxd->wrx_len = 0;
   1841 		rxd->wrx_cksum = 0;
   1842 		rxd->wrx_status = 0;
   1843 		rxd->wrx_errors = 0;
   1844 		rxd->wrx_special = 0;
   1845 	}
   1846 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1847 
   1848 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1849 }
   1850 
   1851 /*
   1852  * Device driver interface functions and commonly used functions.
   1853  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1854  */
   1855 
   1856 /* Lookup supported device table */
   1857 static const struct wm_product *
   1858 wm_lookup(const struct pci_attach_args *pa)
   1859 {
   1860 	const struct wm_product *wmp;
   1861 
   1862 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1863 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1864 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1865 			return wmp;
   1866 	}
   1867 	return NULL;
   1868 }
   1869 
   1870 /* The match function (ca_match) */
   1871 static int
   1872 wm_match(device_t parent, cfdata_t cf, void *aux)
   1873 {
   1874 	struct pci_attach_args *pa = aux;
   1875 
   1876 	if (wm_lookup(pa) != NULL)
   1877 		return 1;
   1878 
   1879 	return 0;
   1880 }
   1881 
   1882 /* The attach function (ca_attach) */
   1883 static void
   1884 wm_attach(device_t parent, device_t self, void *aux)
   1885 {
   1886 	struct wm_softc *sc = device_private(self);
   1887 	struct pci_attach_args *pa = aux;
   1888 	prop_dictionary_t dict;
   1889 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1890 	pci_chipset_tag_t pc = pa->pa_pc;
   1891 	int counts[PCI_INTR_TYPE_SIZE];
   1892 	pci_intr_type_t max_type;
   1893 	const char *eetype, *xname;
   1894 	bus_space_tag_t memt;
   1895 	bus_space_handle_t memh;
   1896 	bus_size_t memsize;
   1897 	int memh_valid;
   1898 	int i, error;
   1899 	const struct wm_product *wmp;
   1900 	prop_data_t ea;
   1901 	prop_number_t pn;
   1902 	uint8_t enaddr[ETHER_ADDR_LEN];
   1903 	char buf[256];
   1904 	char wqname[MAXCOMLEN];
   1905 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1906 	pcireg_t preg, memtype;
   1907 	uint16_t eeprom_data, apme_mask;
   1908 	bool force_clear_smbi;
   1909 	uint32_t link_mode;
   1910 	uint32_t reg;
   1911 
   1912 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
   1913 	sc->sc_debug = WM_DEBUG_DEFAULT;
   1914 #endif
   1915 	sc->sc_dev = self;
   1916 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
   1917 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
   1918 	sc->sc_core_stopping = false;
   1919 
   1920 	wmp = wm_lookup(pa);
   1921 #ifdef DIAGNOSTIC
   1922 	if (wmp == NULL) {
   1923 		printf("\n");
   1924 		panic("wm_attach: impossible");
   1925 	}
   1926 #endif
   1927 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1928 
   1929 	sc->sc_pc = pa->pa_pc;
   1930 	sc->sc_pcitag = pa->pa_tag;
   1931 
   1932 	if (pci_dma64_available(pa)) {
   1933 		aprint_verbose(", 64-bit DMA");
   1934 		sc->sc_dmat = pa->pa_dmat64;
   1935 	} else {
   1936 		aprint_verbose(", 32-bit DMA");
   1937 		sc->sc_dmat = pa->pa_dmat;
   1938 	}
   1939 
   1940 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1941 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1942 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1943 
   1944 	sc->sc_type = wmp->wmp_type;
   1945 
   1946 	/* Set default function pointers */
   1947 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1948 	sc->phy.release = sc->nvm.release = wm_put_null;
   1949 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1950 
   1951 	if (sc->sc_type < WM_T_82543) {
   1952 		if (sc->sc_rev < 2) {
   1953 			aprint_error_dev(sc->sc_dev,
   1954 			    "i82542 must be at least rev. 2\n");
   1955 			return;
   1956 		}
   1957 		if (sc->sc_rev < 3)
   1958 			sc->sc_type = WM_T_82542_2_0;
   1959 	}
   1960 
   1961 	/*
   1962 	 * Disable MSI for Errata:
   1963 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1964 	 *
   1965 	 *  82544: Errata 25
   1966 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1967 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1968 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1969 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1970 	 *
   1971 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1972 	 *
   1973 	 *  82571 & 82572: Errata 63
   1974 	 */
   1975 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1976 	    || (sc->sc_type == WM_T_82572))
   1977 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1978 
   1979 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1980 	    || (sc->sc_type == WM_T_82580)
   1981 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1982 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1983 		sc->sc_flags |= WM_F_NEWQUEUE;
   1984 
   1985 	/* Set device properties (mactype) */
   1986 	dict = device_properties(sc->sc_dev);
   1987 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1988 
   1989 	/*
   1990 	 * Map the device.  All devices support memory-mapped acccess,
   1991 	 * and it is really required for normal operation.
   1992 	 */
   1993 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1994 	switch (memtype) {
   1995 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1996 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1997 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1998 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1999 		break;
   2000 	default:
   2001 		memh_valid = 0;
   2002 		break;
   2003 	}
   2004 
   2005 	if (memh_valid) {
   2006 		sc->sc_st = memt;
   2007 		sc->sc_sh = memh;
   2008 		sc->sc_ss = memsize;
   2009 	} else {
   2010 		aprint_error_dev(sc->sc_dev,
   2011 		    "unable to map device registers\n");
   2012 		return;
   2013 	}
   2014 
   2015 	/*
   2016 	 * In addition, i82544 and later support I/O mapped indirect
   2017 	 * register access.  It is not desirable (nor supported in
   2018 	 * this driver) to use it for normal operation, though it is
   2019 	 * required to work around bugs in some chip versions.
   2020 	 */
   2021 	switch (sc->sc_type) {
   2022 	case WM_T_82544:
   2023 	case WM_T_82541:
   2024 	case WM_T_82541_2:
   2025 	case WM_T_82547:
   2026 	case WM_T_82547_2:
   2027 		/* First we have to find the I/O BAR. */
   2028 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   2029 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   2030 			if (memtype == PCI_MAPREG_TYPE_IO)
   2031 				break;
   2032 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   2033 			    PCI_MAPREG_MEM_TYPE_64BIT)
   2034 				i += 4;	/* skip high bits, too */
   2035 		}
   2036 		if (i < PCI_MAPREG_END) {
   2037 			/*
   2038 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   2039 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   2040 			 * It's no problem because newer chips has no this
   2041 			 * bug.
   2042 			 *
   2043 			 * The i8254x doesn't apparently respond when the
   2044 			 * I/O BAR is 0, which looks somewhat like it's not
   2045 			 * been configured.
   2046 			 */
   2047 			preg = pci_conf_read(pc, pa->pa_tag, i);
   2048 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   2049 				aprint_error_dev(sc->sc_dev,
   2050 				    "WARNING: I/O BAR at zero.\n");
   2051 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   2052 					0, &sc->sc_iot, &sc->sc_ioh,
   2053 					NULL, &sc->sc_ios) == 0) {
   2054 				sc->sc_flags |= WM_F_IOH_VALID;
   2055 			} else
   2056 				aprint_error_dev(sc->sc_dev,
   2057 				    "WARNING: unable to map I/O space\n");
   2058 		}
   2059 		break;
   2060 	default:
   2061 		break;
   2062 	}
   2063 
   2064 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   2065 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   2066 	preg |= PCI_COMMAND_MASTER_ENABLE;
   2067 	if (sc->sc_type < WM_T_82542_2_1)
   2068 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   2069 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   2070 
   2071 	/* Power up chip */
   2072 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   2073 	    && error != EOPNOTSUPP) {
   2074 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   2075 		return;
   2076 	}
   2077 
   2078 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   2079 	/*
   2080 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   2081 	 * resource.
   2082 	 */
   2083 	if (sc->sc_nqueues > 1) {
   2084 		max_type = PCI_INTR_TYPE_MSIX;
   2085 		/*
   2086 		 *  82583 has a MSI-X capability in the PCI configuration space
   2087 		 * but it doesn't support it. At least the document doesn't
   2088 		 * say anything about MSI-X.
   2089 		 */
   2090 		counts[PCI_INTR_TYPE_MSIX]
   2091 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   2092 	} else {
   2093 		max_type = PCI_INTR_TYPE_MSI;
   2094 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2095 	}
   2096 
   2097 	/* Allocation settings */
   2098 	counts[PCI_INTR_TYPE_MSI] = 1;
   2099 	counts[PCI_INTR_TYPE_INTX] = 1;
   2100 	/* overridden by disable flags */
   2101 	if (wm_disable_msi != 0) {
   2102 		counts[PCI_INTR_TYPE_MSI] = 0;
   2103 		if (wm_disable_msix != 0) {
   2104 			max_type = PCI_INTR_TYPE_INTX;
   2105 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2106 		}
   2107 	} else if (wm_disable_msix != 0) {
   2108 		max_type = PCI_INTR_TYPE_MSI;
   2109 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2110 	}
   2111 
   2112 alloc_retry:
   2113 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2114 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2115 		return;
   2116 	}
   2117 
   2118 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2119 		error = wm_setup_msix(sc);
   2120 		if (error) {
   2121 			pci_intr_release(pc, sc->sc_intrs,
   2122 			    counts[PCI_INTR_TYPE_MSIX]);
   2123 
   2124 			/* Setup for MSI: Disable MSI-X */
   2125 			max_type = PCI_INTR_TYPE_MSI;
   2126 			counts[PCI_INTR_TYPE_MSI] = 1;
   2127 			counts[PCI_INTR_TYPE_INTX] = 1;
   2128 			goto alloc_retry;
   2129 		}
   2130 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2131 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2132 		error = wm_setup_legacy(sc);
   2133 		if (error) {
   2134 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2135 			    counts[PCI_INTR_TYPE_MSI]);
   2136 
   2137 			/* The next try is for INTx: Disable MSI */
   2138 			max_type = PCI_INTR_TYPE_INTX;
   2139 			counts[PCI_INTR_TYPE_INTX] = 1;
   2140 			goto alloc_retry;
   2141 		}
   2142 	} else {
   2143 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2144 		error = wm_setup_legacy(sc);
   2145 		if (error) {
   2146 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2147 			    counts[PCI_INTR_TYPE_INTX]);
   2148 			return;
   2149 		}
   2150 	}
   2151 
   2152 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2153 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2154 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2155 	    WM_WORKQUEUE_FLAGS);
   2156 	if (error) {
   2157 		aprint_error_dev(sc->sc_dev,
   2158 		    "unable to create workqueue\n");
   2159 		goto out;
   2160 	}
   2161 
   2162 	/*
   2163 	 * Check the function ID (unit number of the chip).
   2164 	 */
   2165 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2166 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2167 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2168 	    || (sc->sc_type == WM_T_82580)
   2169 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2170 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2171 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2172 	else
   2173 		sc->sc_funcid = 0;
   2174 
   2175 	/*
   2176 	 * Determine a few things about the bus we're connected to.
   2177 	 */
   2178 	if (sc->sc_type < WM_T_82543) {
   2179 		/* We don't really know the bus characteristics here. */
   2180 		sc->sc_bus_speed = 33;
   2181 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2182 		/*
   2183 		 * CSA (Communication Streaming Architecture) is about as fast
   2184 		 * a 32-bit 66MHz PCI Bus.
   2185 		 */
   2186 		sc->sc_flags |= WM_F_CSA;
   2187 		sc->sc_bus_speed = 66;
   2188 		aprint_verbose_dev(sc->sc_dev,
   2189 		    "Communication Streaming Architecture\n");
   2190 		if (sc->sc_type == WM_T_82547) {
   2191 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
   2192 			callout_setfunc(&sc->sc_txfifo_ch,
   2193 			    wm_82547_txfifo_stall, sc);
   2194 			aprint_verbose_dev(sc->sc_dev,
   2195 			    "using 82547 Tx FIFO stall work-around\n");
   2196 		}
   2197 	} else if (sc->sc_type >= WM_T_82571) {
   2198 		sc->sc_flags |= WM_F_PCIE;
   2199 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2200 		    && (sc->sc_type != WM_T_ICH10)
   2201 		    && (sc->sc_type != WM_T_PCH)
   2202 		    && (sc->sc_type != WM_T_PCH2)
   2203 		    && (sc->sc_type != WM_T_PCH_LPT)
   2204 		    && (sc->sc_type != WM_T_PCH_SPT)
   2205 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2206 			/* ICH* and PCH* have no PCIe capability registers */
   2207 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2208 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2209 				NULL) == 0)
   2210 				aprint_error_dev(sc->sc_dev,
   2211 				    "unable to find PCIe capability\n");
   2212 		}
   2213 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2214 	} else {
   2215 		reg = CSR_READ(sc, WMREG_STATUS);
   2216 		if (reg & STATUS_BUS64)
   2217 			sc->sc_flags |= WM_F_BUS64;
   2218 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2219 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2220 
   2221 			sc->sc_flags |= WM_F_PCIX;
   2222 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2223 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2224 				aprint_error_dev(sc->sc_dev,
   2225 				    "unable to find PCIX capability\n");
   2226 			else if (sc->sc_type != WM_T_82545_3 &&
   2227 				 sc->sc_type != WM_T_82546_3) {
   2228 				/*
   2229 				 * Work around a problem caused by the BIOS
   2230 				 * setting the max memory read byte count
   2231 				 * incorrectly.
   2232 				 */
   2233 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2234 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2235 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2236 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2237 
   2238 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2239 				    PCIX_CMD_BYTECNT_SHIFT;
   2240 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2241 				    PCIX_STATUS_MAXB_SHIFT;
   2242 				if (bytecnt > maxb) {
   2243 					aprint_verbose_dev(sc->sc_dev,
   2244 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2245 					    512 << bytecnt, 512 << maxb);
   2246 					pcix_cmd = (pcix_cmd &
   2247 					    ~PCIX_CMD_BYTECNT_MASK) |
   2248 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2249 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2250 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2251 					    pcix_cmd);
   2252 				}
   2253 			}
   2254 		}
   2255 		/*
   2256 		 * The quad port adapter is special; it has a PCIX-PCIX
   2257 		 * bridge on the board, and can run the secondary bus at
   2258 		 * a higher speed.
   2259 		 */
   2260 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2261 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2262 								      : 66;
   2263 		} else if (sc->sc_flags & WM_F_PCIX) {
   2264 			switch (reg & STATUS_PCIXSPD_MASK) {
   2265 			case STATUS_PCIXSPD_50_66:
   2266 				sc->sc_bus_speed = 66;
   2267 				break;
   2268 			case STATUS_PCIXSPD_66_100:
   2269 				sc->sc_bus_speed = 100;
   2270 				break;
   2271 			case STATUS_PCIXSPD_100_133:
   2272 				sc->sc_bus_speed = 133;
   2273 				break;
   2274 			default:
   2275 				aprint_error_dev(sc->sc_dev,
   2276 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2277 				    reg & STATUS_PCIXSPD_MASK);
   2278 				sc->sc_bus_speed = 66;
   2279 				break;
   2280 			}
   2281 		} else
   2282 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2283 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2284 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2285 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2286 	}
   2287 
   2288 	/* clear interesting stat counters */
   2289 	CSR_READ(sc, WMREG_COLC);
   2290 	CSR_READ(sc, WMREG_RXERRC);
   2291 
   2292 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2293 	    || (sc->sc_type >= WM_T_ICH8))
   2294 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2295 	if (sc->sc_type >= WM_T_ICH8)
   2296 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2297 
   2298 	/* Set PHY, NVM mutex related stuff */
   2299 	switch (sc->sc_type) {
   2300 	case WM_T_82542_2_0:
   2301 	case WM_T_82542_2_1:
   2302 	case WM_T_82543:
   2303 	case WM_T_82544:
   2304 		/* Microwire */
   2305 		sc->nvm.read = wm_nvm_read_uwire;
   2306 		sc->sc_nvm_wordsize = 64;
   2307 		sc->sc_nvm_addrbits = 6;
   2308 		break;
   2309 	case WM_T_82540:
   2310 	case WM_T_82545:
   2311 	case WM_T_82545_3:
   2312 	case WM_T_82546:
   2313 	case WM_T_82546_3:
   2314 		/* Microwire */
   2315 		sc->nvm.read = wm_nvm_read_uwire;
   2316 		reg = CSR_READ(sc, WMREG_EECD);
   2317 		if (reg & EECD_EE_SIZE) {
   2318 			sc->sc_nvm_wordsize = 256;
   2319 			sc->sc_nvm_addrbits = 8;
   2320 		} else {
   2321 			sc->sc_nvm_wordsize = 64;
   2322 			sc->sc_nvm_addrbits = 6;
   2323 		}
   2324 		sc->sc_flags |= WM_F_LOCK_EECD;
   2325 		sc->nvm.acquire = wm_get_eecd;
   2326 		sc->nvm.release = wm_put_eecd;
   2327 		break;
   2328 	case WM_T_82541:
   2329 	case WM_T_82541_2:
   2330 	case WM_T_82547:
   2331 	case WM_T_82547_2:
   2332 		reg = CSR_READ(sc, WMREG_EECD);
   2333 		/*
   2334 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2335 		 * on 8254[17], so set flags and functios before calling it.
   2336 		 */
   2337 		sc->sc_flags |= WM_F_LOCK_EECD;
   2338 		sc->nvm.acquire = wm_get_eecd;
   2339 		sc->nvm.release = wm_put_eecd;
   2340 		if (reg & EECD_EE_TYPE) {
   2341 			/* SPI */
   2342 			sc->nvm.read = wm_nvm_read_spi;
   2343 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2344 			wm_nvm_set_addrbits_size_eecd(sc);
   2345 		} else {
   2346 			/* Microwire */
   2347 			sc->nvm.read = wm_nvm_read_uwire;
   2348 			if ((reg & EECD_EE_ABITS) != 0) {
   2349 				sc->sc_nvm_wordsize = 256;
   2350 				sc->sc_nvm_addrbits = 8;
   2351 			} else {
   2352 				sc->sc_nvm_wordsize = 64;
   2353 				sc->sc_nvm_addrbits = 6;
   2354 			}
   2355 		}
   2356 		break;
   2357 	case WM_T_82571:
   2358 	case WM_T_82572:
   2359 		/* SPI */
   2360 		sc->nvm.read = wm_nvm_read_eerd;
   2361 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2362 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2363 		wm_nvm_set_addrbits_size_eecd(sc);
   2364 		sc->phy.acquire = wm_get_swsm_semaphore;
   2365 		sc->phy.release = wm_put_swsm_semaphore;
   2366 		sc->nvm.acquire = wm_get_nvm_82571;
   2367 		sc->nvm.release = wm_put_nvm_82571;
   2368 		break;
   2369 	case WM_T_82573:
   2370 	case WM_T_82574:
   2371 	case WM_T_82583:
   2372 		sc->nvm.read = wm_nvm_read_eerd;
   2373 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2374 		if (sc->sc_type == WM_T_82573) {
   2375 			sc->phy.acquire = wm_get_swsm_semaphore;
   2376 			sc->phy.release = wm_put_swsm_semaphore;
   2377 			sc->nvm.acquire = wm_get_nvm_82571;
   2378 			sc->nvm.release = wm_put_nvm_82571;
   2379 		} else {
   2380 			/* Both PHY and NVM use the same semaphore. */
   2381 			sc->phy.acquire = sc->nvm.acquire
   2382 			    = wm_get_swfwhw_semaphore;
   2383 			sc->phy.release = sc->nvm.release
   2384 			    = wm_put_swfwhw_semaphore;
   2385 		}
   2386 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2387 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2388 			sc->sc_nvm_wordsize = 2048;
   2389 		} else {
   2390 			/* SPI */
   2391 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2392 			wm_nvm_set_addrbits_size_eecd(sc);
   2393 		}
   2394 		break;
   2395 	case WM_T_82575:
   2396 	case WM_T_82576:
   2397 	case WM_T_82580:
   2398 	case WM_T_I350:
   2399 	case WM_T_I354:
   2400 	case WM_T_80003:
   2401 		/* SPI */
   2402 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2403 		wm_nvm_set_addrbits_size_eecd(sc);
   2404 		if ((sc->sc_type == WM_T_80003)
   2405 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2406 			sc->nvm.read = wm_nvm_read_eerd;
   2407 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2408 		} else {
   2409 			sc->nvm.read = wm_nvm_read_spi;
   2410 			sc->sc_flags |= WM_F_LOCK_EECD;
   2411 		}
   2412 		sc->phy.acquire = wm_get_phy_82575;
   2413 		sc->phy.release = wm_put_phy_82575;
   2414 		sc->nvm.acquire = wm_get_nvm_80003;
   2415 		sc->nvm.release = wm_put_nvm_80003;
   2416 		break;
   2417 	case WM_T_ICH8:
   2418 	case WM_T_ICH9:
   2419 	case WM_T_ICH10:
   2420 	case WM_T_PCH:
   2421 	case WM_T_PCH2:
   2422 	case WM_T_PCH_LPT:
   2423 		sc->nvm.read = wm_nvm_read_ich8;
   2424 		/* FLASH */
   2425 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2426 		sc->sc_nvm_wordsize = 2048;
   2427 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2428 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2429 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2430 			aprint_error_dev(sc->sc_dev,
   2431 			    "can't map FLASH registers\n");
   2432 			goto out;
   2433 		}
   2434 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2435 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2436 		    ICH_FLASH_SECTOR_SIZE;
   2437 		sc->sc_ich8_flash_bank_size =
   2438 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2439 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2440 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2441 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2442 		sc->sc_flashreg_offset = 0;
   2443 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2444 		sc->phy.release = wm_put_swflag_ich8lan;
   2445 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2446 		sc->nvm.release = wm_put_nvm_ich8lan;
   2447 		break;
   2448 	case WM_T_PCH_SPT:
   2449 	case WM_T_PCH_CNP:
   2450 		sc->nvm.read = wm_nvm_read_spt;
   2451 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2452 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2453 		sc->sc_flasht = sc->sc_st;
   2454 		sc->sc_flashh = sc->sc_sh;
   2455 		sc->sc_ich8_flash_base = 0;
   2456 		sc->sc_nvm_wordsize =
   2457 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2458 		    * NVM_SIZE_MULTIPLIER;
   2459 		/* It is size in bytes, we want words */
   2460 		sc->sc_nvm_wordsize /= 2;
   2461 		/* Assume 2 banks */
   2462 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2463 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2464 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2465 		sc->phy.release = wm_put_swflag_ich8lan;
   2466 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2467 		sc->nvm.release = wm_put_nvm_ich8lan;
   2468 		break;
   2469 	case WM_T_I210:
   2470 	case WM_T_I211:
   2471 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2472 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2473 		if (wm_nvm_flash_presence_i210(sc)) {
   2474 			sc->nvm.read = wm_nvm_read_eerd;
   2475 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2476 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2477 			wm_nvm_set_addrbits_size_eecd(sc);
   2478 		} else {
   2479 			sc->nvm.read = wm_nvm_read_invm;
   2480 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2481 			sc->sc_nvm_wordsize = INVM_SIZE;
   2482 		}
   2483 		sc->phy.acquire = wm_get_phy_82575;
   2484 		sc->phy.release = wm_put_phy_82575;
   2485 		sc->nvm.acquire = wm_get_nvm_80003;
   2486 		sc->nvm.release = wm_put_nvm_80003;
   2487 		break;
   2488 	default:
   2489 		break;
   2490 	}
   2491 
   2492 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2493 	switch (sc->sc_type) {
   2494 	case WM_T_82571:
   2495 	case WM_T_82572:
   2496 		reg = CSR_READ(sc, WMREG_SWSM2);
   2497 		if ((reg & SWSM2_LOCK) == 0) {
   2498 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2499 			force_clear_smbi = true;
   2500 		} else
   2501 			force_clear_smbi = false;
   2502 		break;
   2503 	case WM_T_82573:
   2504 	case WM_T_82574:
   2505 	case WM_T_82583:
   2506 		force_clear_smbi = true;
   2507 		break;
   2508 	default:
   2509 		force_clear_smbi = false;
   2510 		break;
   2511 	}
   2512 	if (force_clear_smbi) {
   2513 		reg = CSR_READ(sc, WMREG_SWSM);
   2514 		if ((reg & SWSM_SMBI) != 0)
   2515 			aprint_error_dev(sc->sc_dev,
   2516 			    "Please update the Bootagent\n");
   2517 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2518 	}
   2519 
   2520 	/*
   2521 	 * Defer printing the EEPROM type until after verifying the checksum
   2522 	 * This allows the EEPROM type to be printed correctly in the case
   2523 	 * that no EEPROM is attached.
   2524 	 */
   2525 	/*
   2526 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2527 	 * this for later, so we can fail future reads from the EEPROM.
   2528 	 */
   2529 	if (wm_nvm_validate_checksum(sc)) {
   2530 		/*
   2531 		 * Read twice again because some PCI-e parts fail the
   2532 		 * first check due to the link being in sleep state.
   2533 		 */
   2534 		if (wm_nvm_validate_checksum(sc))
   2535 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2536 	}
   2537 
   2538 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2539 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2540 	else {
   2541 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2542 		    sc->sc_nvm_wordsize);
   2543 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2544 			aprint_verbose("iNVM");
   2545 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2546 			aprint_verbose("FLASH(HW)");
   2547 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2548 			aprint_verbose("FLASH");
   2549 		else {
   2550 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2551 				eetype = "SPI";
   2552 			else
   2553 				eetype = "MicroWire";
   2554 			aprint_verbose("(%d address bits) %s EEPROM",
   2555 			    sc->sc_nvm_addrbits, eetype);
   2556 		}
   2557 	}
   2558 	wm_nvm_version(sc);
   2559 	aprint_verbose("\n");
   2560 
   2561 	/*
   2562 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2563 	 * incorrect.
   2564 	 */
   2565 	wm_gmii_setup_phytype(sc, 0, 0);
   2566 
   2567 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2568 	switch (sc->sc_type) {
   2569 	case WM_T_ICH8:
   2570 	case WM_T_ICH9:
   2571 	case WM_T_ICH10:
   2572 	case WM_T_PCH:
   2573 	case WM_T_PCH2:
   2574 	case WM_T_PCH_LPT:
   2575 	case WM_T_PCH_SPT:
   2576 	case WM_T_PCH_CNP:
   2577 		apme_mask = WUC_APME;
   2578 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2579 		if ((eeprom_data & apme_mask) != 0)
   2580 			sc->sc_flags |= WM_F_WOL;
   2581 		break;
   2582 	default:
   2583 		break;
   2584 	}
   2585 
   2586 	/* Reset the chip to a known state. */
   2587 	wm_reset(sc);
   2588 
   2589 	/*
   2590 	 * Check for I21[01] PLL workaround.
   2591 	 *
   2592 	 * Three cases:
   2593 	 * a) Chip is I211.
   2594 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2595 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2596 	 */
   2597 	if (sc->sc_type == WM_T_I211)
   2598 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2599 	if (sc->sc_type == WM_T_I210) {
   2600 		if (!wm_nvm_flash_presence_i210(sc))
   2601 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2602 		else if ((sc->sc_nvm_ver_major < 3)
   2603 		    || ((sc->sc_nvm_ver_major == 3)
   2604 			&& (sc->sc_nvm_ver_minor < 25))) {
   2605 			aprint_verbose_dev(sc->sc_dev,
   2606 			    "ROM image version %d.%d is older than 3.25\n",
   2607 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2608 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2609 		}
   2610 	}
   2611 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2612 		wm_pll_workaround_i210(sc);
   2613 
   2614 	wm_get_wakeup(sc);
   2615 
   2616 	/* Non-AMT based hardware can now take control from firmware */
   2617 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2618 		wm_get_hw_control(sc);
   2619 
   2620 	/*
   2621 	 * Read the Ethernet address from the EEPROM, if not first found
   2622 	 * in device properties.
   2623 	 */
   2624 	ea = prop_dictionary_get(dict, "mac-address");
   2625 	if (ea != NULL) {
   2626 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2627 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2628 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
   2629 	} else {
   2630 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2631 			aprint_error_dev(sc->sc_dev,
   2632 			    "unable to read Ethernet address\n");
   2633 			goto out;
   2634 		}
   2635 	}
   2636 
   2637 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2638 	    ether_sprintf(enaddr));
   2639 
   2640 	/*
   2641 	 * Read the config info from the EEPROM, and set up various
   2642 	 * bits in the control registers based on their contents.
   2643 	 */
   2644 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2645 	if (pn != NULL) {
   2646 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2647 		cfg1 = (uint16_t) prop_number_signed_value(pn);
   2648 	} else {
   2649 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2650 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2651 			goto out;
   2652 		}
   2653 	}
   2654 
   2655 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2656 	if (pn != NULL) {
   2657 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2658 		cfg2 = (uint16_t) prop_number_signed_value(pn);
   2659 	} else {
   2660 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2661 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2662 			goto out;
   2663 		}
   2664 	}
   2665 
   2666 	/* check for WM_F_WOL */
   2667 	switch (sc->sc_type) {
   2668 	case WM_T_82542_2_0:
   2669 	case WM_T_82542_2_1:
   2670 	case WM_T_82543:
   2671 		/* dummy? */
   2672 		eeprom_data = 0;
   2673 		apme_mask = NVM_CFG3_APME;
   2674 		break;
   2675 	case WM_T_82544:
   2676 		apme_mask = NVM_CFG2_82544_APM_EN;
   2677 		eeprom_data = cfg2;
   2678 		break;
   2679 	case WM_T_82546:
   2680 	case WM_T_82546_3:
   2681 	case WM_T_82571:
   2682 	case WM_T_82572:
   2683 	case WM_T_82573:
   2684 	case WM_T_82574:
   2685 	case WM_T_82583:
   2686 	case WM_T_80003:
   2687 	case WM_T_82575:
   2688 	case WM_T_82576:
   2689 		apme_mask = NVM_CFG3_APME;
   2690 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2691 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2692 		break;
   2693 	case WM_T_82580:
   2694 	case WM_T_I350:
   2695 	case WM_T_I354:
   2696 	case WM_T_I210:
   2697 	case WM_T_I211:
   2698 		apme_mask = NVM_CFG3_APME;
   2699 		wm_nvm_read(sc,
   2700 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2701 		    1, &eeprom_data);
   2702 		break;
   2703 	case WM_T_ICH8:
   2704 	case WM_T_ICH9:
   2705 	case WM_T_ICH10:
   2706 	case WM_T_PCH:
   2707 	case WM_T_PCH2:
   2708 	case WM_T_PCH_LPT:
   2709 	case WM_T_PCH_SPT:
   2710 	case WM_T_PCH_CNP:
   2711 		/* Already checked before wm_reset () */
   2712 		apme_mask = eeprom_data = 0;
   2713 		break;
   2714 	default: /* XXX 82540 */
   2715 		apme_mask = NVM_CFG3_APME;
   2716 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2717 		break;
   2718 	}
   2719 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2720 	if ((eeprom_data & apme_mask) != 0)
   2721 		sc->sc_flags |= WM_F_WOL;
   2722 
   2723 	/*
   2724 	 * We have the eeprom settings, now apply the special cases
   2725 	 * where the eeprom may be wrong or the board won't support
   2726 	 * wake on lan on a particular port
   2727 	 */
   2728 	switch (sc->sc_pcidevid) {
   2729 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2730 		sc->sc_flags &= ~WM_F_WOL;
   2731 		break;
   2732 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2733 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2734 		/* Wake events only supported on port A for dual fiber
   2735 		 * regardless of eeprom setting */
   2736 		if (sc->sc_funcid == 1)
   2737 			sc->sc_flags &= ~WM_F_WOL;
   2738 		break;
   2739 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2740 		/* If quad port adapter, disable WoL on all but port A */
   2741 		if (sc->sc_funcid != 0)
   2742 			sc->sc_flags &= ~WM_F_WOL;
   2743 		break;
   2744 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2745 		/* Wake events only supported on port A for dual fiber
   2746 		 * regardless of eeprom setting */
   2747 		if (sc->sc_funcid == 1)
   2748 			sc->sc_flags &= ~WM_F_WOL;
   2749 		break;
   2750 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2751 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2752 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2753 		/* If quad port adapter, disable WoL on all but port A */
   2754 		if (sc->sc_funcid != 0)
   2755 			sc->sc_flags &= ~WM_F_WOL;
   2756 		break;
   2757 	}
   2758 
   2759 	if (sc->sc_type >= WM_T_82575) {
   2760 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2761 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2762 			    nvmword);
   2763 			if ((sc->sc_type == WM_T_82575) ||
   2764 			    (sc->sc_type == WM_T_82576)) {
   2765 				/* Check NVM for autonegotiation */
   2766 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2767 				    != 0)
   2768 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2769 			}
   2770 			if ((sc->sc_type == WM_T_82575) ||
   2771 			    (sc->sc_type == WM_T_I350)) {
   2772 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2773 					sc->sc_flags |= WM_F_MAS;
   2774 			}
   2775 		}
   2776 	}
   2777 
   2778 	/*
   2779 	 * XXX need special handling for some multiple port cards
   2780 	 * to disable a paticular port.
   2781 	 */
   2782 
   2783 	if (sc->sc_type >= WM_T_82544) {
   2784 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2785 		if (pn != NULL) {
   2786 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2787 			swdpin = (uint16_t) prop_number_signed_value(pn);
   2788 		} else {
   2789 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2790 				aprint_error_dev(sc->sc_dev,
   2791 				    "unable to read SWDPIN\n");
   2792 				goto out;
   2793 			}
   2794 		}
   2795 	}
   2796 
   2797 	if (cfg1 & NVM_CFG1_ILOS)
   2798 		sc->sc_ctrl |= CTRL_ILOS;
   2799 
   2800 	/*
   2801 	 * XXX
   2802 	 * This code isn't correct because pin 2 and 3 are located
   2803 	 * in different position on newer chips. Check all datasheet.
   2804 	 *
   2805 	 * Until resolve this problem, check if a chip < 82580
   2806 	 */
   2807 	if (sc->sc_type <= WM_T_82580) {
   2808 		if (sc->sc_type >= WM_T_82544) {
   2809 			sc->sc_ctrl |=
   2810 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2811 			    CTRL_SWDPIO_SHIFT;
   2812 			sc->sc_ctrl |=
   2813 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2814 			    CTRL_SWDPINS_SHIFT;
   2815 		} else {
   2816 			sc->sc_ctrl |=
   2817 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2818 			    CTRL_SWDPIO_SHIFT;
   2819 		}
   2820 	}
   2821 
   2822 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2823 		wm_nvm_read(sc,
   2824 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2825 		    1, &nvmword);
   2826 		if (nvmword & NVM_CFG3_ILOS)
   2827 			sc->sc_ctrl |= CTRL_ILOS;
   2828 	}
   2829 
   2830 #if 0
   2831 	if (sc->sc_type >= WM_T_82544) {
   2832 		if (cfg1 & NVM_CFG1_IPS0)
   2833 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2834 		if (cfg1 & NVM_CFG1_IPS1)
   2835 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2836 		sc->sc_ctrl_ext |=
   2837 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2838 		    CTRL_EXT_SWDPIO_SHIFT;
   2839 		sc->sc_ctrl_ext |=
   2840 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2841 		    CTRL_EXT_SWDPINS_SHIFT;
   2842 	} else {
   2843 		sc->sc_ctrl_ext |=
   2844 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2845 		    CTRL_EXT_SWDPIO_SHIFT;
   2846 	}
   2847 #endif
   2848 
   2849 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2850 #if 0
   2851 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2852 #endif
   2853 
   2854 	if (sc->sc_type == WM_T_PCH) {
   2855 		uint16_t val;
   2856 
   2857 		/* Save the NVM K1 bit setting */
   2858 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2859 
   2860 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2861 			sc->sc_nvm_k1_enabled = 1;
   2862 		else
   2863 			sc->sc_nvm_k1_enabled = 0;
   2864 	}
   2865 
   2866 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2867 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2868 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2869 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2870 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2871 	    || sc->sc_type == WM_T_82573
   2872 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2873 		/* Copper only */
   2874 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2875 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2876 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2877 	    || (sc->sc_type ==WM_T_I211)) {
   2878 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2879 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2880 		switch (link_mode) {
   2881 		case CTRL_EXT_LINK_MODE_1000KX:
   2882 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2883 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2884 			break;
   2885 		case CTRL_EXT_LINK_MODE_SGMII:
   2886 			if (wm_sgmii_uses_mdio(sc)) {
   2887 				aprint_normal_dev(sc->sc_dev,
   2888 				    "SGMII(MDIO)\n");
   2889 				sc->sc_flags |= WM_F_SGMII;
   2890 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2891 				break;
   2892 			}
   2893 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2894 			/*FALLTHROUGH*/
   2895 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2896 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2897 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2898 				if (link_mode
   2899 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2900 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2901 					sc->sc_flags |= WM_F_SGMII;
   2902 					aprint_verbose_dev(sc->sc_dev,
   2903 					    "SGMII\n");
   2904 				} else {
   2905 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2906 					aprint_verbose_dev(sc->sc_dev,
   2907 					    "SERDES\n");
   2908 				}
   2909 				break;
   2910 			}
   2911 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2912 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2913 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2914 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2915 				sc->sc_flags |= WM_F_SGMII;
   2916 			}
   2917 			/* Do not change link mode for 100BaseFX */
   2918 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2919 				break;
   2920 
   2921 			/* Change current link mode setting */
   2922 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2923 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2924 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2925 			else
   2926 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2927 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2928 			break;
   2929 		case CTRL_EXT_LINK_MODE_GMII:
   2930 		default:
   2931 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2932 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2933 			break;
   2934 		}
   2935 
   2936 		reg &= ~CTRL_EXT_I2C_ENA;
   2937 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2938 			reg |= CTRL_EXT_I2C_ENA;
   2939 		else
   2940 			reg &= ~CTRL_EXT_I2C_ENA;
   2941 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2942 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2943 			if (!wm_sgmii_uses_mdio(sc))
   2944 				wm_gmii_setup_phytype(sc, 0, 0);
   2945 			wm_reset_mdicnfg_82580(sc);
   2946 		}
   2947 	} else if (sc->sc_type < WM_T_82543 ||
   2948 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2949 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2950 			aprint_error_dev(sc->sc_dev,
   2951 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2952 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2953 		}
   2954 	} else {
   2955 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2956 			aprint_error_dev(sc->sc_dev,
   2957 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2958 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2959 		}
   2960 	}
   2961 
   2962 	if (sc->sc_type >= WM_T_PCH2)
   2963 		sc->sc_flags |= WM_F_EEE;
   2964 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2965 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2966 		/* XXX: Need special handling for I354. (not yet) */
   2967 		if (sc->sc_type != WM_T_I354)
   2968 			sc->sc_flags |= WM_F_EEE;
   2969 	}
   2970 
   2971 	/*
   2972 	 * The I350 has a bug where it always strips the CRC whether
   2973 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   2974 	 */
   2975 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   2976 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   2977 		sc->sc_flags |= WM_F_CRC_STRIP;
   2978 
   2979 	/* Set device properties (macflags) */
   2980 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2981 
   2982 	if (sc->sc_flags != 0) {
   2983 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2984 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2985 	}
   2986 
   2987 #ifdef WM_MPSAFE
   2988 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2989 #else
   2990 	sc->sc_core_lock = NULL;
   2991 #endif
   2992 
   2993 	/* Initialize the media structures accordingly. */
   2994 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2995 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2996 	else
   2997 		wm_tbi_mediainit(sc); /* All others */
   2998 
   2999 	ifp = &sc->sc_ethercom.ec_if;
   3000 	xname = device_xname(sc->sc_dev);
   3001 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   3002 	ifp->if_softc = sc;
   3003 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3004 #ifdef WM_MPSAFE
   3005 	ifp->if_extflags = IFEF_MPSAFE;
   3006 #endif
   3007 	ifp->if_ioctl = wm_ioctl;
   3008 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3009 		ifp->if_start = wm_nq_start;
   3010 		/*
   3011 		 * When the number of CPUs is one and the controller can use
   3012 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   3013 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   3014 		 * and the other is used for link status changing.
   3015 		 * In this situation, wm_nq_transmit() is disadvantageous
   3016 		 * because of wm_select_txqueue() and pcq(9) overhead.
   3017 		 */
   3018 		if (wm_is_using_multiqueue(sc))
   3019 			ifp->if_transmit = wm_nq_transmit;
   3020 	} else {
   3021 		ifp->if_start = wm_start;
   3022 		/*
   3023 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
   3024 		 * described above.
   3025 		 */
   3026 		if (wm_is_using_multiqueue(sc))
   3027 			ifp->if_transmit = wm_transmit;
   3028 	}
   3029 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   3030 	ifp->if_init = wm_init;
   3031 	ifp->if_stop = wm_stop;
   3032 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   3033 	IFQ_SET_READY(&ifp->if_snd);
   3034 
   3035 	/* Check for jumbo frame */
   3036 	switch (sc->sc_type) {
   3037 	case WM_T_82573:
   3038 		/* XXX limited to 9234 if ASPM is disabled */
   3039 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   3040 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   3041 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3042 		break;
   3043 	case WM_T_82571:
   3044 	case WM_T_82572:
   3045 	case WM_T_82574:
   3046 	case WM_T_82583:
   3047 	case WM_T_82575:
   3048 	case WM_T_82576:
   3049 	case WM_T_82580:
   3050 	case WM_T_I350:
   3051 	case WM_T_I354:
   3052 	case WM_T_I210:
   3053 	case WM_T_I211:
   3054 	case WM_T_80003:
   3055 	case WM_T_ICH9:
   3056 	case WM_T_ICH10:
   3057 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   3058 	case WM_T_PCH_LPT:
   3059 	case WM_T_PCH_SPT:
   3060 	case WM_T_PCH_CNP:
   3061 		/* XXX limited to 9234 */
   3062 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3063 		break;
   3064 	case WM_T_PCH:
   3065 		/* XXX limited to 4096 */
   3066 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3067 		break;
   3068 	case WM_T_82542_2_0:
   3069 	case WM_T_82542_2_1:
   3070 	case WM_T_ICH8:
   3071 		/* No support for jumbo frame */
   3072 		break;
   3073 	default:
   3074 		/* ETHER_MAX_LEN_JUMBO */
   3075 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3076 		break;
   3077 	}
   3078 
   3079 	/* If we're a i82543 or greater, we can support VLANs. */
   3080 	if (sc->sc_type >= WM_T_82543) {
   3081 		sc->sc_ethercom.ec_capabilities |=
   3082 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   3083 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   3084 	}
   3085 
   3086 	if ((sc->sc_flags & WM_F_EEE) != 0)
   3087 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   3088 
   3089 	/*
   3090 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
   3091 	 * on i82543 and later.
   3092 	 */
   3093 	if (sc->sc_type >= WM_T_82543) {
   3094 		ifp->if_capabilities |=
   3095 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   3096 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3097 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   3098 		    IFCAP_CSUM_TCPv6_Tx |
   3099 		    IFCAP_CSUM_UDPv6_Tx;
   3100 	}
   3101 
   3102 	/*
   3103 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   3104 	 *
   3105 	 *	82541GI (8086:1076) ... no
   3106 	 *	82572EI (8086:10b9) ... yes
   3107 	 */
   3108 	if (sc->sc_type >= WM_T_82571) {
   3109 		ifp->if_capabilities |=
   3110 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3111 	}
   3112 
   3113 	/*
   3114 	 * If we're a i82544 or greater (except i82547), we can do
   3115 	 * TCP segmentation offload.
   3116 	 */
   3117 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3118 		ifp->if_capabilities |= IFCAP_TSOv4;
   3119 	}
   3120 
   3121 	if (sc->sc_type >= WM_T_82571) {
   3122 		ifp->if_capabilities |= IFCAP_TSOv6;
   3123 	}
   3124 
   3125 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3126 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3127 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3128 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3129 
   3130 	/* Attach the interface. */
   3131 	if_initialize(ifp);
   3132 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3133 	ether_ifattach(ifp, enaddr);
   3134 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3135 	if_register(ifp);
   3136 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   3137 	    RND_FLAG_DEFAULT);
   3138 
   3139 #ifdef WM_EVENT_COUNTERS
   3140 	/* Attach event counters. */
   3141 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3142 	    NULL, xname, "linkintr");
   3143 
   3144 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3145 	    NULL, xname, "tx_xoff");
   3146 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3147 	    NULL, xname, "tx_xon");
   3148 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3149 	    NULL, xname, "rx_xoff");
   3150 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3151 	    NULL, xname, "rx_xon");
   3152 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3153 	    NULL, xname, "rx_macctl");
   3154 #endif /* WM_EVENT_COUNTERS */
   3155 
   3156 	sc->sc_txrx_use_workqueue = false;
   3157 
   3158 	if (wm_phy_need_linkdown_discard(sc)) {
   3159 		DPRINTF(sc, WM_DEBUG_LINK,
   3160 		    ("%s: %s: Set linkdown discard flag\n",
   3161 			device_xname(sc->sc_dev), __func__));
   3162 		wm_set_linkdown_discard(sc);
   3163 	}
   3164 
   3165 	wm_init_sysctls(sc);
   3166 
   3167 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3168 		pmf_class_network_register(self, ifp);
   3169 	else
   3170 		aprint_error_dev(self, "couldn't establish power handler\n");
   3171 
   3172 	sc->sc_flags |= WM_F_ATTACHED;
   3173 out:
   3174 	return;
   3175 }
   3176 
   3177 /* The detach function (ca_detach) */
   3178 static int
   3179 wm_detach(device_t self, int flags __unused)
   3180 {
   3181 	struct wm_softc *sc = device_private(self);
   3182 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3183 	int i;
   3184 
   3185 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3186 		return 0;
   3187 
   3188 	/* Stop the interface. Callouts are stopped in it. */
   3189 	wm_stop(ifp, 1);
   3190 
   3191 	pmf_device_deregister(self);
   3192 
   3193 	sysctl_teardown(&sc->sc_sysctllog);
   3194 
   3195 #ifdef WM_EVENT_COUNTERS
   3196 	evcnt_detach(&sc->sc_ev_linkintr);
   3197 
   3198 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3199 	evcnt_detach(&sc->sc_ev_tx_xon);
   3200 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3201 	evcnt_detach(&sc->sc_ev_rx_xon);
   3202 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3203 #endif /* WM_EVENT_COUNTERS */
   3204 
   3205 	rnd_detach_source(&sc->rnd_source);
   3206 
   3207 	/* Tell the firmware about the release */
   3208 	WM_CORE_LOCK(sc);
   3209 	wm_release_manageability(sc);
   3210 	wm_release_hw_control(sc);
   3211 	wm_enable_wakeup(sc);
   3212 	WM_CORE_UNLOCK(sc);
   3213 
   3214 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3215 
   3216 	ether_ifdetach(ifp);
   3217 	if_detach(ifp);
   3218 	if_percpuq_destroy(sc->sc_ipq);
   3219 
   3220 	/* Delete all remaining media. */
   3221 	ifmedia_fini(&sc->sc_mii.mii_media);
   3222 
   3223 	/* Unload RX dmamaps and free mbufs */
   3224 	for (i = 0; i < sc->sc_nqueues; i++) {
   3225 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3226 		mutex_enter(rxq->rxq_lock);
   3227 		wm_rxdrain(rxq);
   3228 		mutex_exit(rxq->rxq_lock);
   3229 	}
   3230 	/* Must unlock here */
   3231 
   3232 	/* Disestablish the interrupt handler */
   3233 	for (i = 0; i < sc->sc_nintrs; i++) {
   3234 		if (sc->sc_ihs[i] != NULL) {
   3235 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3236 			sc->sc_ihs[i] = NULL;
   3237 		}
   3238 	}
   3239 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3240 
   3241 	/* wm_stop() ensure workqueue is stopped. */
   3242 	workqueue_destroy(sc->sc_queue_wq);
   3243 
   3244 	for (i = 0; i < sc->sc_nqueues; i++)
   3245 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3246 
   3247 	wm_free_txrx_queues(sc);
   3248 
   3249 	/* Unmap the registers */
   3250 	if (sc->sc_ss) {
   3251 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3252 		sc->sc_ss = 0;
   3253 	}
   3254 	if (sc->sc_ios) {
   3255 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3256 		sc->sc_ios = 0;
   3257 	}
   3258 	if (sc->sc_flashs) {
   3259 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3260 		sc->sc_flashs = 0;
   3261 	}
   3262 
   3263 	if (sc->sc_core_lock)
   3264 		mutex_obj_free(sc->sc_core_lock);
   3265 	if (sc->sc_ich_phymtx)
   3266 		mutex_obj_free(sc->sc_ich_phymtx);
   3267 	if (sc->sc_ich_nvmmtx)
   3268 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3269 
   3270 	return 0;
   3271 }
   3272 
   3273 static bool
   3274 wm_suspend(device_t self, const pmf_qual_t *qual)
   3275 {
   3276 	struct wm_softc *sc = device_private(self);
   3277 
   3278 	wm_release_manageability(sc);
   3279 	wm_release_hw_control(sc);
   3280 	wm_enable_wakeup(sc);
   3281 
   3282 	return true;
   3283 }
   3284 
   3285 static bool
   3286 wm_resume(device_t self, const pmf_qual_t *qual)
   3287 {
   3288 	struct wm_softc *sc = device_private(self);
   3289 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3290 	pcireg_t reg;
   3291 	char buf[256];
   3292 
   3293 	reg = CSR_READ(sc, WMREG_WUS);
   3294 	if (reg != 0) {
   3295 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3296 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3297 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3298 	}
   3299 
   3300 	if (sc->sc_type >= WM_T_PCH2)
   3301 		wm_resume_workarounds_pchlan(sc);
   3302 	if ((ifp->if_flags & IFF_UP) == 0) {
   3303 		/* >= PCH_SPT hardware workaround before reset. */
   3304 		if (sc->sc_type >= WM_T_PCH_SPT)
   3305 			wm_flush_desc_rings(sc);
   3306 
   3307 		wm_reset(sc);
   3308 		/* Non-AMT based hardware can now take control from firmware */
   3309 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3310 			wm_get_hw_control(sc);
   3311 		wm_init_manageability(sc);
   3312 	} else {
   3313 		/*
   3314 		 * We called pmf_class_network_register(), so if_init() is
   3315 		 * automatically called when IFF_UP. wm_reset(),
   3316 		 * wm_get_hw_control() and wm_init_manageability() are called
   3317 		 * via wm_init().
   3318 		 */
   3319 	}
   3320 
   3321 	return true;
   3322 }
   3323 
   3324 /*
   3325  * wm_watchdog:		[ifnet interface function]
   3326  *
   3327  *	Watchdog timer handler.
   3328  */
   3329 static void
   3330 wm_watchdog(struct ifnet *ifp)
   3331 {
   3332 	int qid;
   3333 	struct wm_softc *sc = ifp->if_softc;
   3334 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3335 
   3336 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3337 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3338 
   3339 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3340 	}
   3341 
   3342 	/* IF any of queues hanged up, reset the interface. */
   3343 	if (hang_queue != 0) {
   3344 		(void)wm_init(ifp);
   3345 
   3346 		/*
   3347 		 * There are still some upper layer processing which call
   3348 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3349 		 */
   3350 		/* Try to get more packets going. */
   3351 		ifp->if_start(ifp);
   3352 	}
   3353 }
   3354 
   3355 
   3356 static void
   3357 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3358 {
   3359 
   3360 	mutex_enter(txq->txq_lock);
   3361 	if (txq->txq_sending &&
   3362 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3363 		wm_watchdog_txq_locked(ifp, txq, hang);
   3364 
   3365 	mutex_exit(txq->txq_lock);
   3366 }
   3367 
   3368 static void
   3369 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3370     uint16_t *hang)
   3371 {
   3372 	struct wm_softc *sc = ifp->if_softc;
   3373 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3374 
   3375 	KASSERT(mutex_owned(txq->txq_lock));
   3376 
   3377 	/*
   3378 	 * Since we're using delayed interrupts, sweep up
   3379 	 * before we report an error.
   3380 	 */
   3381 	wm_txeof(txq, UINT_MAX);
   3382 
   3383 	if (txq->txq_sending)
   3384 		*hang |= __BIT(wmq->wmq_id);
   3385 
   3386 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3387 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3388 		    device_xname(sc->sc_dev));
   3389 	} else {
   3390 #ifdef WM_DEBUG
   3391 		int i, j;
   3392 		struct wm_txsoft *txs;
   3393 #endif
   3394 		log(LOG_ERR,
   3395 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3396 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3397 		    txq->txq_next);
   3398 		if_statinc(ifp, if_oerrors);
   3399 #ifdef WM_DEBUG
   3400 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3401 		    i = WM_NEXTTXS(txq, i)) {
   3402 			txs = &txq->txq_soft[i];
   3403 			printf("txs %d tx %d -> %d\n",
   3404 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3405 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3406 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3407 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3408 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3409 					printf("\t %#08x%08x\n",
   3410 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3411 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3412 				} else {
   3413 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3414 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3415 					    txq->txq_descs[j].wtx_addr.wa_low);
   3416 					printf("\t %#04x%02x%02x%08x\n",
   3417 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3418 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3419 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3420 					    txq->txq_descs[j].wtx_cmdlen);
   3421 				}
   3422 				if (j == txs->txs_lastdesc)
   3423 					break;
   3424 			}
   3425 		}
   3426 #endif
   3427 	}
   3428 }
   3429 
   3430 /*
   3431  * wm_tick:
   3432  *
   3433  *	One second timer, used to check link status, sweep up
   3434  *	completed transmit jobs, etc.
   3435  */
   3436 static void
   3437 wm_tick(void *arg)
   3438 {
   3439 	struct wm_softc *sc = arg;
   3440 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3441 #ifndef WM_MPSAFE
   3442 	int s = splnet();
   3443 #endif
   3444 
   3445 	WM_CORE_LOCK(sc);
   3446 
   3447 	if (sc->sc_core_stopping) {
   3448 		WM_CORE_UNLOCK(sc);
   3449 #ifndef WM_MPSAFE
   3450 		splx(s);
   3451 #endif
   3452 		return;
   3453 	}
   3454 
   3455 	if (sc->sc_type >= WM_T_82542_2_1) {
   3456 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3457 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3458 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3459 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3460 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3461 	}
   3462 
   3463 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3464 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3465 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3466 	    + CSR_READ(sc, WMREG_CRCERRS)
   3467 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3468 	    + CSR_READ(sc, WMREG_SYMERRC)
   3469 	    + CSR_READ(sc, WMREG_RXERRC)
   3470 	    + CSR_READ(sc, WMREG_SEC)
   3471 	    + CSR_READ(sc, WMREG_CEXTERR)
   3472 	    + CSR_READ(sc, WMREG_RLEC));
   3473 	/*
   3474 	 * WMREG_RNBC is incremented when there are no available buffers in host
   3475 	 * memory. It does not mean the number of dropped packets, because an
   3476 	 * Ethernet controller can receive packets in such case if there is
   3477 	 * space in the phy's FIFO.
   3478 	 *
   3479 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3480 	 * own EVCNT instead of if_iqdrops.
   3481 	 */
   3482 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3483 	IF_STAT_PUTREF(ifp);
   3484 
   3485 	if (sc->sc_flags & WM_F_HAS_MII)
   3486 		mii_tick(&sc->sc_mii);
   3487 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3488 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3489 		wm_serdes_tick(sc);
   3490 	else
   3491 		wm_tbi_tick(sc);
   3492 
   3493 	WM_CORE_UNLOCK(sc);
   3494 #ifndef WM_MPSAFE
   3495 	splx(s);
   3496 #endif
   3497 
   3498 	wm_watchdog(ifp);
   3499 
   3500 	callout_schedule(&sc->sc_tick_ch, hz);
   3501 }
   3502 
   3503 static int
   3504 wm_ifflags_cb(struct ethercom *ec)
   3505 {
   3506 	struct ifnet *ifp = &ec->ec_if;
   3507 	struct wm_softc *sc = ifp->if_softc;
   3508 	u_short iffchange;
   3509 	int ecchange;
   3510 	bool needreset = false;
   3511 	int rc = 0;
   3512 
   3513 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3514 		device_xname(sc->sc_dev), __func__));
   3515 
   3516 	WM_CORE_LOCK(sc);
   3517 
   3518 	/*
   3519 	 * Check for if_flags.
   3520 	 * Main usage is to prevent linkdown when opening bpf.
   3521 	 */
   3522 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3523 	sc->sc_if_flags = ifp->if_flags;
   3524 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3525 		needreset = true;
   3526 		goto ec;
   3527 	}
   3528 
   3529 	/* iff related updates */
   3530 	if ((iffchange & IFF_PROMISC) != 0)
   3531 		wm_set_filter(sc);
   3532 
   3533 	wm_set_vlan(sc);
   3534 
   3535 ec:
   3536 	/* Check for ec_capenable. */
   3537 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3538 	sc->sc_ec_capenable = ec->ec_capenable;
   3539 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3540 		needreset = true;
   3541 		goto out;
   3542 	}
   3543 
   3544 	/* ec related updates */
   3545 	wm_set_eee(sc);
   3546 
   3547 out:
   3548 	if (needreset)
   3549 		rc = ENETRESET;
   3550 	WM_CORE_UNLOCK(sc);
   3551 
   3552 	return rc;
   3553 }
   3554 
   3555 static bool
   3556 wm_phy_need_linkdown_discard(struct wm_softc *sc)
   3557 {
   3558 
   3559 	switch (sc->sc_phytype) {
   3560 	case WMPHY_82577: /* ihphy */
   3561 	case WMPHY_82578: /* atphy */
   3562 	case WMPHY_82579: /* ihphy */
   3563 	case WMPHY_I217: /* ihphy */
   3564 	case WMPHY_82580: /* ihphy */
   3565 	case WMPHY_I350: /* ihphy */
   3566 		return true;
   3567 	default:
   3568 		return false;
   3569 	}
   3570 }
   3571 
   3572 static void
   3573 wm_set_linkdown_discard(struct wm_softc *sc)
   3574 {
   3575 
   3576 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3577 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3578 
   3579 		mutex_enter(txq->txq_lock);
   3580 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
   3581 		mutex_exit(txq->txq_lock);
   3582 	}
   3583 }
   3584 
   3585 static void
   3586 wm_clear_linkdown_discard(struct wm_softc *sc)
   3587 {
   3588 
   3589 	for (int i = 0; i < sc->sc_nqueues; i++) {
   3590 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3591 
   3592 		mutex_enter(txq->txq_lock);
   3593 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
   3594 		mutex_exit(txq->txq_lock);
   3595 	}
   3596 }
   3597 
   3598 /*
   3599  * wm_ioctl:		[ifnet interface function]
   3600  *
   3601  *	Handle control requests from the operator.
   3602  */
   3603 static int
   3604 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3605 {
   3606 	struct wm_softc *sc = ifp->if_softc;
   3607 	struct ifreq *ifr = (struct ifreq *)data;
   3608 	struct ifaddr *ifa = (struct ifaddr *)data;
   3609 	struct sockaddr_dl *sdl;
   3610 	int s, error;
   3611 
   3612 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3613 		device_xname(sc->sc_dev), __func__));
   3614 
   3615 #ifndef WM_MPSAFE
   3616 	s = splnet();
   3617 #endif
   3618 	switch (cmd) {
   3619 	case SIOCSIFMEDIA:
   3620 		WM_CORE_LOCK(sc);
   3621 		/* Flow control requires full-duplex mode. */
   3622 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3623 		    (ifr->ifr_media & IFM_FDX) == 0)
   3624 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3625 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3626 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3627 				/* We can do both TXPAUSE and RXPAUSE. */
   3628 				ifr->ifr_media |=
   3629 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3630 			}
   3631 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3632 		}
   3633 		WM_CORE_UNLOCK(sc);
   3634 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3635 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
   3636 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
   3637 				DPRINTF(sc, WM_DEBUG_LINK,
   3638 				    ("%s: %s: Set linkdown discard flag\n",
   3639 					device_xname(sc->sc_dev), __func__));
   3640 				wm_set_linkdown_discard(sc);
   3641 			}
   3642 		}
   3643 		break;
   3644 	case SIOCINITIFADDR:
   3645 		WM_CORE_LOCK(sc);
   3646 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3647 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3648 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3649 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3650 			/* Unicast address is the first multicast entry */
   3651 			wm_set_filter(sc);
   3652 			error = 0;
   3653 			WM_CORE_UNLOCK(sc);
   3654 			break;
   3655 		}
   3656 		WM_CORE_UNLOCK(sc);
   3657 		/*FALLTHROUGH*/
   3658 	default:
   3659 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
   3660 			if (((ifp->if_flags & IFF_UP) != 0) &&
   3661 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
   3662 				DPRINTF(sc, WM_DEBUG_LINK,
   3663 				    ("%s: %s: Set linkdown discard flag\n",
   3664 					device_xname(sc->sc_dev), __func__));
   3665 				wm_set_linkdown_discard(sc);
   3666 			}
   3667 		}
   3668 #ifdef WM_MPSAFE
   3669 		s = splnet();
   3670 #endif
   3671 		/* It may call wm_start, so unlock here */
   3672 		error = ether_ioctl(ifp, cmd, data);
   3673 #ifdef WM_MPSAFE
   3674 		splx(s);
   3675 #endif
   3676 		if (error != ENETRESET)
   3677 			break;
   3678 
   3679 		error = 0;
   3680 
   3681 		if (cmd == SIOCSIFCAP)
   3682 			error = if_init(ifp);
   3683 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3684 			;
   3685 		else if (ifp->if_flags & IFF_RUNNING) {
   3686 			/*
   3687 			 * Multicast list has changed; set the hardware filter
   3688 			 * accordingly.
   3689 			 */
   3690 			WM_CORE_LOCK(sc);
   3691 			wm_set_filter(sc);
   3692 			WM_CORE_UNLOCK(sc);
   3693 		}
   3694 		break;
   3695 	}
   3696 
   3697 #ifndef WM_MPSAFE
   3698 	splx(s);
   3699 #endif
   3700 	return error;
   3701 }
   3702 
   3703 /* MAC address related */
   3704 
   3705 /*
   3706  * Get the offset of MAC address and return it.
   3707  * If error occured, use offset 0.
   3708  */
   3709 static uint16_t
   3710 wm_check_alt_mac_addr(struct wm_softc *sc)
   3711 {
   3712 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3713 	uint16_t offset = NVM_OFF_MACADDR;
   3714 
   3715 	/* Try to read alternative MAC address pointer */
   3716 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3717 		return 0;
   3718 
   3719 	/* Check pointer if it's valid or not. */
   3720 	if ((offset == 0x0000) || (offset == 0xffff))
   3721 		return 0;
   3722 
   3723 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3724 	/*
   3725 	 * Check whether alternative MAC address is valid or not.
   3726 	 * Some cards have non 0xffff pointer but those don't use
   3727 	 * alternative MAC address in reality.
   3728 	 *
   3729 	 * Check whether the broadcast bit is set or not.
   3730 	 */
   3731 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3732 		if (((myea[0] & 0xff) & 0x01) == 0)
   3733 			return offset; /* Found */
   3734 
   3735 	/* Not found */
   3736 	return 0;
   3737 }
   3738 
   3739 static int
   3740 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3741 {
   3742 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3743 	uint16_t offset = NVM_OFF_MACADDR;
   3744 	int do_invert = 0;
   3745 
   3746 	switch (sc->sc_type) {
   3747 	case WM_T_82580:
   3748 	case WM_T_I350:
   3749 	case WM_T_I354:
   3750 		/* EEPROM Top Level Partitioning */
   3751 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3752 		break;
   3753 	case WM_T_82571:
   3754 	case WM_T_82575:
   3755 	case WM_T_82576:
   3756 	case WM_T_80003:
   3757 	case WM_T_I210:
   3758 	case WM_T_I211:
   3759 		offset = wm_check_alt_mac_addr(sc);
   3760 		if (offset == 0)
   3761 			if ((sc->sc_funcid & 0x01) == 1)
   3762 				do_invert = 1;
   3763 		break;
   3764 	default:
   3765 		if ((sc->sc_funcid & 0x01) == 1)
   3766 			do_invert = 1;
   3767 		break;
   3768 	}
   3769 
   3770 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3771 		goto bad;
   3772 
   3773 	enaddr[0] = myea[0] & 0xff;
   3774 	enaddr[1] = myea[0] >> 8;
   3775 	enaddr[2] = myea[1] & 0xff;
   3776 	enaddr[3] = myea[1] >> 8;
   3777 	enaddr[4] = myea[2] & 0xff;
   3778 	enaddr[5] = myea[2] >> 8;
   3779 
   3780 	/*
   3781 	 * Toggle the LSB of the MAC address on the second port
   3782 	 * of some dual port cards.
   3783 	 */
   3784 	if (do_invert != 0)
   3785 		enaddr[5] ^= 1;
   3786 
   3787 	return 0;
   3788 
   3789  bad:
   3790 	return -1;
   3791 }
   3792 
   3793 /*
   3794  * wm_set_ral:
   3795  *
   3796  *	Set an entery in the receive address list.
   3797  */
   3798 static void
   3799 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3800 {
   3801 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3802 	uint32_t wlock_mac;
   3803 	int rv;
   3804 
   3805 	if (enaddr != NULL) {
   3806 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3807 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3808 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3809 		ral_hi |= RAL_AV;
   3810 	} else {
   3811 		ral_lo = 0;
   3812 		ral_hi = 0;
   3813 	}
   3814 
   3815 	switch (sc->sc_type) {
   3816 	case WM_T_82542_2_0:
   3817 	case WM_T_82542_2_1:
   3818 	case WM_T_82543:
   3819 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3820 		CSR_WRITE_FLUSH(sc);
   3821 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3822 		CSR_WRITE_FLUSH(sc);
   3823 		break;
   3824 	case WM_T_PCH2:
   3825 	case WM_T_PCH_LPT:
   3826 	case WM_T_PCH_SPT:
   3827 	case WM_T_PCH_CNP:
   3828 		if (idx == 0) {
   3829 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3830 			CSR_WRITE_FLUSH(sc);
   3831 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3832 			CSR_WRITE_FLUSH(sc);
   3833 			return;
   3834 		}
   3835 		if (sc->sc_type != WM_T_PCH2) {
   3836 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3837 			    FWSM_WLOCK_MAC);
   3838 			addrl = WMREG_SHRAL(idx - 1);
   3839 			addrh = WMREG_SHRAH(idx - 1);
   3840 		} else {
   3841 			wlock_mac = 0;
   3842 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3843 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3844 		}
   3845 
   3846 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3847 			rv = wm_get_swflag_ich8lan(sc);
   3848 			if (rv != 0)
   3849 				return;
   3850 			CSR_WRITE(sc, addrl, ral_lo);
   3851 			CSR_WRITE_FLUSH(sc);
   3852 			CSR_WRITE(sc, addrh, ral_hi);
   3853 			CSR_WRITE_FLUSH(sc);
   3854 			wm_put_swflag_ich8lan(sc);
   3855 		}
   3856 
   3857 		break;
   3858 	default:
   3859 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3860 		CSR_WRITE_FLUSH(sc);
   3861 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3862 		CSR_WRITE_FLUSH(sc);
   3863 		break;
   3864 	}
   3865 }
   3866 
   3867 /*
   3868  * wm_mchash:
   3869  *
   3870  *	Compute the hash of the multicast address for the 4096-bit
   3871  *	multicast filter.
   3872  */
   3873 static uint32_t
   3874 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3875 {
   3876 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3877 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3878 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3879 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3880 	uint32_t hash;
   3881 
   3882 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3883 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3884 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3885 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3886 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3887 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3888 		return (hash & 0x3ff);
   3889 	}
   3890 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3891 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3892 
   3893 	return (hash & 0xfff);
   3894 }
   3895 
   3896 /*
   3897  *
   3898  *
   3899  */
   3900 static int
   3901 wm_rar_count(struct wm_softc *sc)
   3902 {
   3903 	int size;
   3904 
   3905 	switch (sc->sc_type) {
   3906 	case WM_T_ICH8:
   3907 		size = WM_RAL_TABSIZE_ICH8 -1;
   3908 		break;
   3909 	case WM_T_ICH9:
   3910 	case WM_T_ICH10:
   3911 	case WM_T_PCH:
   3912 		size = WM_RAL_TABSIZE_ICH8;
   3913 		break;
   3914 	case WM_T_PCH2:
   3915 		size = WM_RAL_TABSIZE_PCH2;
   3916 		break;
   3917 	case WM_T_PCH_LPT:
   3918 	case WM_T_PCH_SPT:
   3919 	case WM_T_PCH_CNP:
   3920 		size = WM_RAL_TABSIZE_PCH_LPT;
   3921 		break;
   3922 	case WM_T_82575:
   3923 	case WM_T_I210:
   3924 	case WM_T_I211:
   3925 		size = WM_RAL_TABSIZE_82575;
   3926 		break;
   3927 	case WM_T_82576:
   3928 	case WM_T_82580:
   3929 		size = WM_RAL_TABSIZE_82576;
   3930 		break;
   3931 	case WM_T_I350:
   3932 	case WM_T_I354:
   3933 		size = WM_RAL_TABSIZE_I350;
   3934 		break;
   3935 	default:
   3936 		size = WM_RAL_TABSIZE;
   3937 	}
   3938 
   3939 	return size;
   3940 }
   3941 
   3942 /*
   3943  * wm_set_filter:
   3944  *
   3945  *	Set up the receive filter.
   3946  */
   3947 static void
   3948 wm_set_filter(struct wm_softc *sc)
   3949 {
   3950 	struct ethercom *ec = &sc->sc_ethercom;
   3951 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3952 	struct ether_multi *enm;
   3953 	struct ether_multistep step;
   3954 	bus_addr_t mta_reg;
   3955 	uint32_t hash, reg, bit;
   3956 	int i, size, ralmax, rv;
   3957 
   3958 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   3959 		device_xname(sc->sc_dev), __func__));
   3960 
   3961 	if (sc->sc_type >= WM_T_82544)
   3962 		mta_reg = WMREG_CORDOVA_MTA;
   3963 	else
   3964 		mta_reg = WMREG_MTA;
   3965 
   3966 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3967 
   3968 	if (ifp->if_flags & IFF_BROADCAST)
   3969 		sc->sc_rctl |= RCTL_BAM;
   3970 	if (ifp->if_flags & IFF_PROMISC) {
   3971 		sc->sc_rctl |= RCTL_UPE;
   3972 		ETHER_LOCK(ec);
   3973 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3974 		ETHER_UNLOCK(ec);
   3975 		goto allmulti;
   3976 	}
   3977 
   3978 	/*
   3979 	 * Set the station address in the first RAL slot, and
   3980 	 * clear the remaining slots.
   3981 	 */
   3982 	size = wm_rar_count(sc);
   3983 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3984 
   3985 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3986 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3987 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3988 		switch (i) {
   3989 		case 0:
   3990 			/* We can use all entries */
   3991 			ralmax = size;
   3992 			break;
   3993 		case 1:
   3994 			/* Only RAR[0] */
   3995 			ralmax = 1;
   3996 			break;
   3997 		default:
   3998 			/* Available SHRA + RAR[0] */
   3999 			ralmax = i + 1;
   4000 		}
   4001 	} else
   4002 		ralmax = size;
   4003 	for (i = 1; i < size; i++) {
   4004 		if (i < ralmax)
   4005 			wm_set_ral(sc, NULL, i);
   4006 	}
   4007 
   4008 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4009 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4010 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4011 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   4012 		size = WM_ICH8_MC_TABSIZE;
   4013 	else
   4014 		size = WM_MC_TABSIZE;
   4015 	/* Clear out the multicast table. */
   4016 	for (i = 0; i < size; i++) {
   4017 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   4018 		CSR_WRITE_FLUSH(sc);
   4019 	}
   4020 
   4021 	ETHER_LOCK(ec);
   4022 	ETHER_FIRST_MULTI(step, ec, enm);
   4023 	while (enm != NULL) {
   4024 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   4025 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4026 			ETHER_UNLOCK(ec);
   4027 			/*
   4028 			 * We must listen to a range of multicast addresses.
   4029 			 * For now, just accept all multicasts, rather than
   4030 			 * trying to set only those filter bits needed to match
   4031 			 * the range.  (At this time, the only use of address
   4032 			 * ranges is for IP multicast routing, for which the
   4033 			 * range is big enough to require all bits set.)
   4034 			 */
   4035 			goto allmulti;
   4036 		}
   4037 
   4038 		hash = wm_mchash(sc, enm->enm_addrlo);
   4039 
   4040 		reg = (hash >> 5);
   4041 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4042 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4043 		    || (sc->sc_type == WM_T_PCH2)
   4044 		    || (sc->sc_type == WM_T_PCH_LPT)
   4045 		    || (sc->sc_type == WM_T_PCH_SPT)
   4046 		    || (sc->sc_type == WM_T_PCH_CNP))
   4047 			reg &= 0x1f;
   4048 		else
   4049 			reg &= 0x7f;
   4050 		bit = hash & 0x1f;
   4051 
   4052 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   4053 		hash |= 1U << bit;
   4054 
   4055 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   4056 			/*
   4057 			 * 82544 Errata 9: Certain register cannot be written
   4058 			 * with particular alignments in PCI-X bus operation
   4059 			 * (FCAH, MTA and VFTA).
   4060 			 */
   4061 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   4062 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4063 			CSR_WRITE_FLUSH(sc);
   4064 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   4065 			CSR_WRITE_FLUSH(sc);
   4066 		} else {
   4067 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   4068 			CSR_WRITE_FLUSH(sc);
   4069 		}
   4070 
   4071 		ETHER_NEXT_MULTI(step, enm);
   4072 	}
   4073 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4074 	ETHER_UNLOCK(ec);
   4075 
   4076 	goto setit;
   4077 
   4078  allmulti:
   4079 	sc->sc_rctl |= RCTL_MPE;
   4080 
   4081  setit:
   4082 	if (sc->sc_type >= WM_T_PCH2) {
   4083 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4084 		    && (ifp->if_mtu > ETHERMTU))
   4085 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
   4086 		else
   4087 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
   4088 		if (rv != 0)
   4089 			device_printf(sc->sc_dev,
   4090 			    "Failed to do workaround for jumbo frame.\n");
   4091 	}
   4092 
   4093 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   4094 }
   4095 
   4096 /* Reset and init related */
   4097 
   4098 static void
   4099 wm_set_vlan(struct wm_softc *sc)
   4100 {
   4101 
   4102 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4103 		device_xname(sc->sc_dev), __func__));
   4104 
   4105 	/* Deal with VLAN enables. */
   4106 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   4107 		sc->sc_ctrl |= CTRL_VME;
   4108 	else
   4109 		sc->sc_ctrl &= ~CTRL_VME;
   4110 
   4111 	/* Write the control registers. */
   4112 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4113 }
   4114 
   4115 static void
   4116 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   4117 {
   4118 	uint32_t gcr;
   4119 	pcireg_t ctrl2;
   4120 
   4121 	gcr = CSR_READ(sc, WMREG_GCR);
   4122 
   4123 	/* Only take action if timeout value is defaulted to 0 */
   4124 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   4125 		goto out;
   4126 
   4127 	if ((gcr & GCR_CAP_VER2) == 0) {
   4128 		gcr |= GCR_CMPL_TMOUT_10MS;
   4129 		goto out;
   4130 	}
   4131 
   4132 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4133 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   4134 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   4135 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4136 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   4137 
   4138 out:
   4139 	/* Disable completion timeout resend */
   4140 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   4141 
   4142 	CSR_WRITE(sc, WMREG_GCR, gcr);
   4143 }
   4144 
   4145 void
   4146 wm_get_auto_rd_done(struct wm_softc *sc)
   4147 {
   4148 	int i;
   4149 
   4150 	/* wait for eeprom to reload */
   4151 	switch (sc->sc_type) {
   4152 	case WM_T_82571:
   4153 	case WM_T_82572:
   4154 	case WM_T_82573:
   4155 	case WM_T_82574:
   4156 	case WM_T_82583:
   4157 	case WM_T_82575:
   4158 	case WM_T_82576:
   4159 	case WM_T_82580:
   4160 	case WM_T_I350:
   4161 	case WM_T_I354:
   4162 	case WM_T_I210:
   4163 	case WM_T_I211:
   4164 	case WM_T_80003:
   4165 	case WM_T_ICH8:
   4166 	case WM_T_ICH9:
   4167 		for (i = 0; i < 10; i++) {
   4168 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   4169 				break;
   4170 			delay(1000);
   4171 		}
   4172 		if (i == 10) {
   4173 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   4174 			    "complete\n", device_xname(sc->sc_dev));
   4175 		}
   4176 		break;
   4177 	default:
   4178 		break;
   4179 	}
   4180 }
   4181 
   4182 void
   4183 wm_lan_init_done(struct wm_softc *sc)
   4184 {
   4185 	uint32_t reg = 0;
   4186 	int i;
   4187 
   4188 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4189 		device_xname(sc->sc_dev), __func__));
   4190 
   4191 	/* Wait for eeprom to reload */
   4192 	switch (sc->sc_type) {
   4193 	case WM_T_ICH10:
   4194 	case WM_T_PCH:
   4195 	case WM_T_PCH2:
   4196 	case WM_T_PCH_LPT:
   4197 	case WM_T_PCH_SPT:
   4198 	case WM_T_PCH_CNP:
   4199 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4200 			reg = CSR_READ(sc, WMREG_STATUS);
   4201 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4202 				break;
   4203 			delay(100);
   4204 		}
   4205 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4206 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4207 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4208 		}
   4209 		break;
   4210 	default:
   4211 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4212 		    __func__);
   4213 		break;
   4214 	}
   4215 
   4216 	reg &= ~STATUS_LAN_INIT_DONE;
   4217 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4218 }
   4219 
   4220 void
   4221 wm_get_cfg_done(struct wm_softc *sc)
   4222 {
   4223 	int mask;
   4224 	uint32_t reg;
   4225 	int i;
   4226 
   4227 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4228 		device_xname(sc->sc_dev), __func__));
   4229 
   4230 	/* Wait for eeprom to reload */
   4231 	switch (sc->sc_type) {
   4232 	case WM_T_82542_2_0:
   4233 	case WM_T_82542_2_1:
   4234 		/* null */
   4235 		break;
   4236 	case WM_T_82543:
   4237 	case WM_T_82544:
   4238 	case WM_T_82540:
   4239 	case WM_T_82545:
   4240 	case WM_T_82545_3:
   4241 	case WM_T_82546:
   4242 	case WM_T_82546_3:
   4243 	case WM_T_82541:
   4244 	case WM_T_82541_2:
   4245 	case WM_T_82547:
   4246 	case WM_T_82547_2:
   4247 	case WM_T_82573:
   4248 	case WM_T_82574:
   4249 	case WM_T_82583:
   4250 		/* generic */
   4251 		delay(10*1000);
   4252 		break;
   4253 	case WM_T_80003:
   4254 	case WM_T_82571:
   4255 	case WM_T_82572:
   4256 	case WM_T_82575:
   4257 	case WM_T_82576:
   4258 	case WM_T_82580:
   4259 	case WM_T_I350:
   4260 	case WM_T_I354:
   4261 	case WM_T_I210:
   4262 	case WM_T_I211:
   4263 		if (sc->sc_type == WM_T_82571) {
   4264 			/* Only 82571 shares port 0 */
   4265 			mask = EEMNGCTL_CFGDONE_0;
   4266 		} else
   4267 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4268 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4269 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4270 				break;
   4271 			delay(1000);
   4272 		}
   4273 		if (i >= WM_PHY_CFG_TIMEOUT)
   4274 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
   4275 				device_xname(sc->sc_dev), __func__));
   4276 		break;
   4277 	case WM_T_ICH8:
   4278 	case WM_T_ICH9:
   4279 	case WM_T_ICH10:
   4280 	case WM_T_PCH:
   4281 	case WM_T_PCH2:
   4282 	case WM_T_PCH_LPT:
   4283 	case WM_T_PCH_SPT:
   4284 	case WM_T_PCH_CNP:
   4285 		delay(10*1000);
   4286 		if (sc->sc_type >= WM_T_ICH10)
   4287 			wm_lan_init_done(sc);
   4288 		else
   4289 			wm_get_auto_rd_done(sc);
   4290 
   4291 		/* Clear PHY Reset Asserted bit */
   4292 		reg = CSR_READ(sc, WMREG_STATUS);
   4293 		if ((reg & STATUS_PHYRA) != 0)
   4294 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4295 		break;
   4296 	default:
   4297 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4298 		    __func__);
   4299 		break;
   4300 	}
   4301 }
   4302 
   4303 int
   4304 wm_phy_post_reset(struct wm_softc *sc)
   4305 {
   4306 	device_t dev = sc->sc_dev;
   4307 	uint16_t reg;
   4308 	int rv = 0;
   4309 
   4310 	/* This function is only for ICH8 and newer. */
   4311 	if (sc->sc_type < WM_T_ICH8)
   4312 		return 0;
   4313 
   4314 	if (wm_phy_resetisblocked(sc)) {
   4315 		/* XXX */
   4316 		device_printf(dev, "PHY is blocked\n");
   4317 		return -1;
   4318 	}
   4319 
   4320 	/* Allow time for h/w to get to quiescent state after reset */
   4321 	delay(10*1000);
   4322 
   4323 	/* Perform any necessary post-reset workarounds */
   4324 	if (sc->sc_type == WM_T_PCH)
   4325 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4326 	else if (sc->sc_type == WM_T_PCH2)
   4327 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4328 	if (rv != 0)
   4329 		return rv;
   4330 
   4331 	/* Clear the host wakeup bit after lcd reset */
   4332 	if (sc->sc_type >= WM_T_PCH) {
   4333 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4334 		reg &= ~BM_WUC_HOST_WU_BIT;
   4335 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4336 	}
   4337 
   4338 	/* Configure the LCD with the extended configuration region in NVM */
   4339 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4340 		return rv;
   4341 
   4342 	/* Configure the LCD with the OEM bits in NVM */
   4343 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4344 
   4345 	if (sc->sc_type == WM_T_PCH2) {
   4346 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4347 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4348 			delay(10 * 1000);
   4349 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4350 		}
   4351 		/* Set EEE LPI Update Timer to 200usec */
   4352 		rv = sc->phy.acquire(sc);
   4353 		if (rv)
   4354 			return rv;
   4355 		rv = wm_write_emi_reg_locked(dev,
   4356 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4357 		sc->phy.release(sc);
   4358 	}
   4359 
   4360 	return rv;
   4361 }
   4362 
   4363 /* Only for PCH and newer */
   4364 static int
   4365 wm_write_smbus_addr(struct wm_softc *sc)
   4366 {
   4367 	uint32_t strap, freq;
   4368 	uint16_t phy_data;
   4369 	int rv;
   4370 
   4371 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4372 		device_xname(sc->sc_dev), __func__));
   4373 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4374 
   4375 	strap = CSR_READ(sc, WMREG_STRAP);
   4376 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4377 
   4378 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4379 	if (rv != 0)
   4380 		return -1;
   4381 
   4382 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4383 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4384 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4385 
   4386 	if (sc->sc_phytype == WMPHY_I217) {
   4387 		/* Restore SMBus frequency */
   4388 		if (freq --) {
   4389 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4390 			    | HV_SMB_ADDR_FREQ_HIGH);
   4391 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4392 			    HV_SMB_ADDR_FREQ_LOW);
   4393 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4394 			    HV_SMB_ADDR_FREQ_HIGH);
   4395 		} else
   4396 			DPRINTF(sc, WM_DEBUG_INIT,
   4397 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4398 				device_xname(sc->sc_dev), __func__));
   4399 	}
   4400 
   4401 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4402 	    phy_data);
   4403 }
   4404 
   4405 static int
   4406 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4407 {
   4408 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4409 	uint16_t phy_page = 0;
   4410 	int rv = 0;
   4411 
   4412 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4413 		device_xname(sc->sc_dev), __func__));
   4414 
   4415 	switch (sc->sc_type) {
   4416 	case WM_T_ICH8:
   4417 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4418 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4419 			return 0;
   4420 
   4421 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4422 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4423 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4424 			break;
   4425 		}
   4426 		/* FALLTHROUGH */
   4427 	case WM_T_PCH:
   4428 	case WM_T_PCH2:
   4429 	case WM_T_PCH_LPT:
   4430 	case WM_T_PCH_SPT:
   4431 	case WM_T_PCH_CNP:
   4432 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4433 		break;
   4434 	default:
   4435 		return 0;
   4436 	}
   4437 
   4438 	if ((rv = sc->phy.acquire(sc)) != 0)
   4439 		return rv;
   4440 
   4441 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4442 	if ((reg & sw_cfg_mask) == 0)
   4443 		goto release;
   4444 
   4445 	/*
   4446 	 * Make sure HW does not configure LCD from PHY extended configuration
   4447 	 * before SW configuration
   4448 	 */
   4449 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4450 	if ((sc->sc_type < WM_T_PCH2)
   4451 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4452 		goto release;
   4453 
   4454 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4455 		device_xname(sc->sc_dev), __func__));
   4456 	/* word_addr is in DWORD */
   4457 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4458 
   4459 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4460 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4461 	if (cnf_size == 0)
   4462 		goto release;
   4463 
   4464 	if (((sc->sc_type == WM_T_PCH)
   4465 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4466 	    || (sc->sc_type > WM_T_PCH)) {
   4467 		/*
   4468 		 * HW configures the SMBus address and LEDs when the OEM and
   4469 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4470 		 * are cleared, SW will configure them instead.
   4471 		 */
   4472 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4473 			device_xname(sc->sc_dev), __func__));
   4474 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4475 			goto release;
   4476 
   4477 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4478 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4479 		    (uint16_t)reg);
   4480 		if (rv != 0)
   4481 			goto release;
   4482 	}
   4483 
   4484 	/* Configure LCD from extended configuration region. */
   4485 	for (i = 0; i < cnf_size; i++) {
   4486 		uint16_t reg_data, reg_addr;
   4487 
   4488 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4489 			goto release;
   4490 
   4491 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4492 			goto release;
   4493 
   4494 		if (reg_addr == IGPHY_PAGE_SELECT)
   4495 			phy_page = reg_data;
   4496 
   4497 		reg_addr &= IGPHY_MAXREGADDR;
   4498 		reg_addr |= phy_page;
   4499 
   4500 		KASSERT(sc->phy.writereg_locked != NULL);
   4501 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4502 		    reg_data);
   4503 	}
   4504 
   4505 release:
   4506 	sc->phy.release(sc);
   4507 	return rv;
   4508 }
   4509 
   4510 /*
   4511  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4512  *  @sc:       pointer to the HW structure
   4513  *  @d0_state: boolean if entering d0 or d3 device state
   4514  *
   4515  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4516  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4517  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4518  */
   4519 int
   4520 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4521 {
   4522 	uint32_t mac_reg;
   4523 	uint16_t oem_reg;
   4524 	int rv;
   4525 
   4526 	if (sc->sc_type < WM_T_PCH)
   4527 		return 0;
   4528 
   4529 	rv = sc->phy.acquire(sc);
   4530 	if (rv != 0)
   4531 		return rv;
   4532 
   4533 	if (sc->sc_type == WM_T_PCH) {
   4534 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4535 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4536 			goto release;
   4537 	}
   4538 
   4539 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4540 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4541 		goto release;
   4542 
   4543 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4544 
   4545 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4546 	if (rv != 0)
   4547 		goto release;
   4548 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4549 
   4550 	if (d0_state) {
   4551 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4552 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4553 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4554 			oem_reg |= HV_OEM_BITS_LPLU;
   4555 	} else {
   4556 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4557 		    != 0)
   4558 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4559 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4560 		    != 0)
   4561 			oem_reg |= HV_OEM_BITS_LPLU;
   4562 	}
   4563 
   4564 	/* Set Restart auto-neg to activate the bits */
   4565 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4566 	    && (wm_phy_resetisblocked(sc) == false))
   4567 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4568 
   4569 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4570 
   4571 release:
   4572 	sc->phy.release(sc);
   4573 
   4574 	return rv;
   4575 }
   4576 
   4577 /* Init hardware bits */
   4578 void
   4579 wm_initialize_hardware_bits(struct wm_softc *sc)
   4580 {
   4581 	uint32_t tarc0, tarc1, reg;
   4582 
   4583 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4584 		device_xname(sc->sc_dev), __func__));
   4585 
   4586 	/* For 82571 variant, 80003 and ICHs */
   4587 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4588 	    || (sc->sc_type >= WM_T_80003)) {
   4589 
   4590 		/* Transmit Descriptor Control 0 */
   4591 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4592 		reg |= TXDCTL_COUNT_DESC;
   4593 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4594 
   4595 		/* Transmit Descriptor Control 1 */
   4596 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4597 		reg |= TXDCTL_COUNT_DESC;
   4598 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4599 
   4600 		/* TARC0 */
   4601 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4602 		switch (sc->sc_type) {
   4603 		case WM_T_82571:
   4604 		case WM_T_82572:
   4605 		case WM_T_82573:
   4606 		case WM_T_82574:
   4607 		case WM_T_82583:
   4608 		case WM_T_80003:
   4609 			/* Clear bits 30..27 */
   4610 			tarc0 &= ~__BITS(30, 27);
   4611 			break;
   4612 		default:
   4613 			break;
   4614 		}
   4615 
   4616 		switch (sc->sc_type) {
   4617 		case WM_T_82571:
   4618 		case WM_T_82572:
   4619 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4620 
   4621 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4622 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4623 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4624 			/* 8257[12] Errata No.7 */
   4625 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4626 
   4627 			/* TARC1 bit 28 */
   4628 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4629 				tarc1 &= ~__BIT(28);
   4630 			else
   4631 				tarc1 |= __BIT(28);
   4632 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4633 
   4634 			/*
   4635 			 * 8257[12] Errata No.13
   4636 			 * Disable Dyamic Clock Gating.
   4637 			 */
   4638 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4639 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4640 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4641 			break;
   4642 		case WM_T_82573:
   4643 		case WM_T_82574:
   4644 		case WM_T_82583:
   4645 			if ((sc->sc_type == WM_T_82574)
   4646 			    || (sc->sc_type == WM_T_82583))
   4647 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4648 
   4649 			/* Extended Device Control */
   4650 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4651 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4652 			reg |= __BIT(22);	/* Set bit 22 */
   4653 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4654 
   4655 			/* Device Control */
   4656 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4657 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4658 
   4659 			/* PCIe Control Register */
   4660 			/*
   4661 			 * 82573 Errata (unknown).
   4662 			 *
   4663 			 * 82574 Errata 25 and 82583 Errata 12
   4664 			 * "Dropped Rx Packets":
   4665 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4666 			 */
   4667 			reg = CSR_READ(sc, WMREG_GCR);
   4668 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4669 			CSR_WRITE(sc, WMREG_GCR, reg);
   4670 
   4671 			if ((sc->sc_type == WM_T_82574)
   4672 			    || (sc->sc_type == WM_T_82583)) {
   4673 				/*
   4674 				 * Document says this bit must be set for
   4675 				 * proper operation.
   4676 				 */
   4677 				reg = CSR_READ(sc, WMREG_GCR);
   4678 				reg |= __BIT(22);
   4679 				CSR_WRITE(sc, WMREG_GCR, reg);
   4680 
   4681 				/*
   4682 				 * Apply workaround for hardware errata
   4683 				 * documented in errata docs Fixes issue where
   4684 				 * some error prone or unreliable PCIe
   4685 				 * completions are occurring, particularly
   4686 				 * with ASPM enabled. Without fix, issue can
   4687 				 * cause Tx timeouts.
   4688 				 */
   4689 				reg = CSR_READ(sc, WMREG_GCR2);
   4690 				reg |= __BIT(0);
   4691 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4692 			}
   4693 			break;
   4694 		case WM_T_80003:
   4695 			/* TARC0 */
   4696 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4697 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4698 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4699 
   4700 			/* TARC1 bit 28 */
   4701 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4702 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4703 				tarc1 &= ~__BIT(28);
   4704 			else
   4705 				tarc1 |= __BIT(28);
   4706 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4707 			break;
   4708 		case WM_T_ICH8:
   4709 		case WM_T_ICH9:
   4710 		case WM_T_ICH10:
   4711 		case WM_T_PCH:
   4712 		case WM_T_PCH2:
   4713 		case WM_T_PCH_LPT:
   4714 		case WM_T_PCH_SPT:
   4715 		case WM_T_PCH_CNP:
   4716 			/* TARC0 */
   4717 			if (sc->sc_type == WM_T_ICH8) {
   4718 				/* Set TARC0 bits 29 and 28 */
   4719 				tarc0 |= __BITS(29, 28);
   4720 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4721 				tarc0 |= __BIT(29);
   4722 				/*
   4723 				 *  Drop bit 28. From Linux.
   4724 				 * See I218/I219 spec update
   4725 				 * "5. Buffer Overrun While the I219 is
   4726 				 * Processing DMA Transactions"
   4727 				 */
   4728 				tarc0 &= ~__BIT(28);
   4729 			}
   4730 			/* Set TARC0 bits 23,24,26,27 */
   4731 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4732 
   4733 			/* CTRL_EXT */
   4734 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4735 			reg |= __BIT(22);	/* Set bit 22 */
   4736 			/*
   4737 			 * Enable PHY low-power state when MAC is at D3
   4738 			 * w/o WoL
   4739 			 */
   4740 			if (sc->sc_type >= WM_T_PCH)
   4741 				reg |= CTRL_EXT_PHYPDEN;
   4742 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4743 
   4744 			/* TARC1 */
   4745 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4746 			/* bit 28 */
   4747 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4748 				tarc1 &= ~__BIT(28);
   4749 			else
   4750 				tarc1 |= __BIT(28);
   4751 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4752 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4753 
   4754 			/* Device Status */
   4755 			if (sc->sc_type == WM_T_ICH8) {
   4756 				reg = CSR_READ(sc, WMREG_STATUS);
   4757 				reg &= ~__BIT(31);
   4758 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4759 
   4760 			}
   4761 
   4762 			/* IOSFPC */
   4763 			if (sc->sc_type == WM_T_PCH_SPT) {
   4764 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4765 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4766 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4767 			}
   4768 			/*
   4769 			 * Work-around descriptor data corruption issue during
   4770 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4771 			 * capability.
   4772 			 */
   4773 			reg = CSR_READ(sc, WMREG_RFCTL);
   4774 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4775 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4776 			break;
   4777 		default:
   4778 			break;
   4779 		}
   4780 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4781 
   4782 		switch (sc->sc_type) {
   4783 		/*
   4784 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4785 		 * Avoid RSS Hash Value bug.
   4786 		 */
   4787 		case WM_T_82571:
   4788 		case WM_T_82572:
   4789 		case WM_T_82573:
   4790 		case WM_T_80003:
   4791 		case WM_T_ICH8:
   4792 			reg = CSR_READ(sc, WMREG_RFCTL);
   4793 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4794 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4795 			break;
   4796 		case WM_T_82574:
   4797 			/* Use extened Rx descriptor. */
   4798 			reg = CSR_READ(sc, WMREG_RFCTL);
   4799 			reg |= WMREG_RFCTL_EXSTEN;
   4800 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4801 			break;
   4802 		default:
   4803 			break;
   4804 		}
   4805 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4806 		/*
   4807 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4808 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4809 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4810 		 * Correctly by the Device"
   4811 		 *
   4812 		 * I354(C2000) Errata AVR53:
   4813 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4814 		 * Hang"
   4815 		 */
   4816 		reg = CSR_READ(sc, WMREG_RFCTL);
   4817 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4818 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4819 	}
   4820 }
   4821 
   4822 static uint32_t
   4823 wm_rxpbs_adjust_82580(uint32_t val)
   4824 {
   4825 	uint32_t rv = 0;
   4826 
   4827 	if (val < __arraycount(wm_82580_rxpbs_table))
   4828 		rv = wm_82580_rxpbs_table[val];
   4829 
   4830 	return rv;
   4831 }
   4832 
   4833 /*
   4834  * wm_reset_phy:
   4835  *
   4836  *	generic PHY reset function.
   4837  *	Same as e1000_phy_hw_reset_generic()
   4838  */
   4839 static int
   4840 wm_reset_phy(struct wm_softc *sc)
   4841 {
   4842 	uint32_t reg;
   4843 
   4844 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4845 		device_xname(sc->sc_dev), __func__));
   4846 	if (wm_phy_resetisblocked(sc))
   4847 		return -1;
   4848 
   4849 	sc->phy.acquire(sc);
   4850 
   4851 	reg = CSR_READ(sc, WMREG_CTRL);
   4852 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4853 	CSR_WRITE_FLUSH(sc);
   4854 
   4855 	delay(sc->phy.reset_delay_us);
   4856 
   4857 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4858 	CSR_WRITE_FLUSH(sc);
   4859 
   4860 	delay(150);
   4861 
   4862 	sc->phy.release(sc);
   4863 
   4864 	wm_get_cfg_done(sc);
   4865 	wm_phy_post_reset(sc);
   4866 
   4867 	return 0;
   4868 }
   4869 
   4870 /*
   4871  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
   4872  *
   4873  * In i219, the descriptor rings must be emptied before resetting the HW
   4874  * or before changing the device state to D3 during runtime (runtime PM).
   4875  *
   4876  * Failure to do this will cause the HW to enter a unit hang state which can
   4877  * only be released by PCI reset on the device.
   4878  *
   4879  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
   4880  */
   4881 static void
   4882 wm_flush_desc_rings(struct wm_softc *sc)
   4883 {
   4884 	pcireg_t preg;
   4885 	uint32_t reg;
   4886 	struct wm_txqueue *txq;
   4887 	wiseman_txdesc_t *txd;
   4888 	int nexttx;
   4889 	uint32_t rctl;
   4890 
   4891 	/* First, disable MULR fix in FEXTNVM11 */
   4892 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4893 	reg |= FEXTNVM11_DIS_MULRFIX;
   4894 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4895 
   4896 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4897 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4898 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4899 		return;
   4900 
   4901 	/*
   4902 	 * Remove all descriptors from the tx_ring.
   4903 	 *
   4904 	 * We want to clear all pending descriptors from the TX ring. Zeroing
   4905 	 * happens when the HW reads the regs. We assign the ring itself as
   4906 	 * the data of the next descriptor. We don't care about the data we are
   4907 	 * about to reset the HW.
   4908 	 */
   4909 #ifdef WM_DEBUG
   4910 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
   4911 #endif
   4912 	reg = CSR_READ(sc, WMREG_TCTL);
   4913 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4914 
   4915 	txq = &sc->sc_queue[0].wmq_txq;
   4916 	nexttx = txq->txq_next;
   4917 	txd = &txq->txq_descs[nexttx];
   4918 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
   4919 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4920 	txd->wtx_fields.wtxu_status = 0;
   4921 	txd->wtx_fields.wtxu_options = 0;
   4922 	txd->wtx_fields.wtxu_vlan = 0;
   4923 
   4924 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4925 	    BUS_SPACE_BARRIER_WRITE);
   4926 
   4927 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4928 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4929 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4930 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4931 	delay(250);
   4932 
   4933 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4934 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4935 		return;
   4936 
   4937 	/*
   4938 	 * Mark all descriptors in the RX ring as consumed and disable the
   4939 	 * rx ring.
   4940 	 */
   4941 #ifdef WM_DEBUG
   4942 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4943 #endif
   4944 	rctl = CSR_READ(sc, WMREG_RCTL);
   4945 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4946 	CSR_WRITE_FLUSH(sc);
   4947 	delay(150);
   4948 
   4949 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4950 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4951 	reg &= 0xffffc000;
   4952 	/*
   4953 	 * Update thresholds: prefetch threshold to 31, host threshold
   4954 	 * to 1 and make sure the granularity is "descriptors" and not
   4955 	 * "cache lines"
   4956 	 */
   4957 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4958 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4959 
   4960 	/* Momentarily enable the RX ring for the changes to take effect */
   4961 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4962 	CSR_WRITE_FLUSH(sc);
   4963 	delay(150);
   4964 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4965 }
   4966 
   4967 /*
   4968  * wm_reset:
   4969  *
   4970  *	Reset the i82542 chip.
   4971  */
   4972 static void
   4973 wm_reset(struct wm_softc *sc)
   4974 {
   4975 	int phy_reset = 0;
   4976 	int i, error = 0;
   4977 	uint32_t reg;
   4978 	uint16_t kmreg;
   4979 	int rv;
   4980 
   4981 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   4982 		device_xname(sc->sc_dev), __func__));
   4983 	KASSERT(sc->sc_type != 0);
   4984 
   4985 	/*
   4986 	 * Allocate on-chip memory according to the MTU size.
   4987 	 * The Packet Buffer Allocation register must be written
   4988 	 * before the chip is reset.
   4989 	 */
   4990 	switch (sc->sc_type) {
   4991 	case WM_T_82547:
   4992 	case WM_T_82547_2:
   4993 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4994 		    PBA_22K : PBA_30K;
   4995 		for (i = 0; i < sc->sc_nqueues; i++) {
   4996 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4997 			txq->txq_fifo_head = 0;
   4998 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4999 			txq->txq_fifo_size =
   5000 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   5001 			txq->txq_fifo_stall = 0;
   5002 		}
   5003 		break;
   5004 	case WM_T_82571:
   5005 	case WM_T_82572:
   5006 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   5007 	case WM_T_80003:
   5008 		sc->sc_pba = PBA_32K;
   5009 		break;
   5010 	case WM_T_82573:
   5011 		sc->sc_pba = PBA_12K;
   5012 		break;
   5013 	case WM_T_82574:
   5014 	case WM_T_82583:
   5015 		sc->sc_pba = PBA_20K;
   5016 		break;
   5017 	case WM_T_82576:
   5018 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   5019 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   5020 		break;
   5021 	case WM_T_82580:
   5022 	case WM_T_I350:
   5023 	case WM_T_I354:
   5024 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   5025 		break;
   5026 	case WM_T_I210:
   5027 	case WM_T_I211:
   5028 		sc->sc_pba = PBA_34K;
   5029 		break;
   5030 	case WM_T_ICH8:
   5031 		/* Workaround for a bit corruption issue in FIFO memory */
   5032 		sc->sc_pba = PBA_8K;
   5033 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   5034 		break;
   5035 	case WM_T_ICH9:
   5036 	case WM_T_ICH10:
   5037 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   5038 		    PBA_14K : PBA_10K;
   5039 		break;
   5040 	case WM_T_PCH:
   5041 	case WM_T_PCH2:	/* XXX 14K? */
   5042 	case WM_T_PCH_LPT:
   5043 	case WM_T_PCH_SPT:
   5044 	case WM_T_PCH_CNP:
   5045 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
   5046 		    PBA_12K : PBA_26K;
   5047 		break;
   5048 	default:
   5049 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   5050 		    PBA_40K : PBA_48K;
   5051 		break;
   5052 	}
   5053 	/*
   5054 	 * Only old or non-multiqueue devices have the PBA register
   5055 	 * XXX Need special handling for 82575.
   5056 	 */
   5057 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5058 	    || (sc->sc_type == WM_T_82575))
   5059 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   5060 
   5061 	/* Prevent the PCI-E bus from sticking */
   5062 	if (sc->sc_flags & WM_F_PCIE) {
   5063 		int timeout = 800;
   5064 
   5065 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   5066 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5067 
   5068 		while (timeout--) {
   5069 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   5070 			    == 0)
   5071 				break;
   5072 			delay(100);
   5073 		}
   5074 		if (timeout == 0)
   5075 			device_printf(sc->sc_dev,
   5076 			    "failed to disable bus mastering\n");
   5077 	}
   5078 
   5079 	/* Set the completion timeout for interface */
   5080 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   5081 	    || (sc->sc_type == WM_T_82580)
   5082 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5083 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   5084 		wm_set_pcie_completion_timeout(sc);
   5085 
   5086 	/* Clear interrupt */
   5087 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5088 	if (wm_is_using_msix(sc)) {
   5089 		if (sc->sc_type != WM_T_82574) {
   5090 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5091 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5092 		} else
   5093 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5094 	}
   5095 
   5096 	/* Stop the transmit and receive processes. */
   5097 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5098 	sc->sc_rctl &= ~RCTL_EN;
   5099 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   5100 	CSR_WRITE_FLUSH(sc);
   5101 
   5102 	/* XXX set_tbi_sbp_82543() */
   5103 
   5104 	delay(10*1000);
   5105 
   5106 	/* Must acquire the MDIO ownership before MAC reset */
   5107 	switch (sc->sc_type) {
   5108 	case WM_T_82573:
   5109 	case WM_T_82574:
   5110 	case WM_T_82583:
   5111 		error = wm_get_hw_semaphore_82573(sc);
   5112 		break;
   5113 	default:
   5114 		break;
   5115 	}
   5116 
   5117 	/*
   5118 	 * 82541 Errata 29? & 82547 Errata 28?
   5119 	 * See also the description about PHY_RST bit in CTRL register
   5120 	 * in 8254x_GBe_SDM.pdf.
   5121 	 */
   5122 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   5123 		CSR_WRITE(sc, WMREG_CTRL,
   5124 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   5125 		CSR_WRITE_FLUSH(sc);
   5126 		delay(5000);
   5127 	}
   5128 
   5129 	switch (sc->sc_type) {
   5130 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   5131 	case WM_T_82541:
   5132 	case WM_T_82541_2:
   5133 	case WM_T_82547:
   5134 	case WM_T_82547_2:
   5135 		/*
   5136 		 * On some chipsets, a reset through a memory-mapped write
   5137 		 * cycle can cause the chip to reset before completing the
   5138 		 * write cycle. This causes major headache that can be avoided
   5139 		 * by issuing the reset via indirect register writes through
   5140 		 * I/O space.
   5141 		 *
   5142 		 * So, if we successfully mapped the I/O BAR at attach time,
   5143 		 * use that. Otherwise, try our luck with a memory-mapped
   5144 		 * reset.
   5145 		 */
   5146 		if (sc->sc_flags & WM_F_IOH_VALID)
   5147 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   5148 		else
   5149 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   5150 		break;
   5151 	case WM_T_82545_3:
   5152 	case WM_T_82546_3:
   5153 		/* Use the shadow control register on these chips. */
   5154 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   5155 		break;
   5156 	case WM_T_80003:
   5157 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5158 		sc->phy.acquire(sc);
   5159 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5160 		sc->phy.release(sc);
   5161 		break;
   5162 	case WM_T_ICH8:
   5163 	case WM_T_ICH9:
   5164 	case WM_T_ICH10:
   5165 	case WM_T_PCH:
   5166 	case WM_T_PCH2:
   5167 	case WM_T_PCH_LPT:
   5168 	case WM_T_PCH_SPT:
   5169 	case WM_T_PCH_CNP:
   5170 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   5171 		if (wm_phy_resetisblocked(sc) == false) {
   5172 			/*
   5173 			 * Gate automatic PHY configuration by hardware on
   5174 			 * non-managed 82579
   5175 			 */
   5176 			if ((sc->sc_type == WM_T_PCH2)
   5177 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   5178 				== 0))
   5179 				wm_gate_hw_phy_config_ich8lan(sc, true);
   5180 
   5181 			reg |= CTRL_PHY_RESET;
   5182 			phy_reset = 1;
   5183 		} else
   5184 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   5185 		sc->phy.acquire(sc);
   5186 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5187 		/* Don't insert a completion barrier when reset */
   5188 		delay(20*1000);
   5189 		mutex_exit(sc->sc_ich_phymtx);
   5190 		break;
   5191 	case WM_T_82580:
   5192 	case WM_T_I350:
   5193 	case WM_T_I354:
   5194 	case WM_T_I210:
   5195 	case WM_T_I211:
   5196 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5197 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   5198 			CSR_WRITE_FLUSH(sc);
   5199 		delay(5000);
   5200 		break;
   5201 	case WM_T_82542_2_0:
   5202 	case WM_T_82542_2_1:
   5203 	case WM_T_82543:
   5204 	case WM_T_82540:
   5205 	case WM_T_82545:
   5206 	case WM_T_82546:
   5207 	case WM_T_82571:
   5208 	case WM_T_82572:
   5209 	case WM_T_82573:
   5210 	case WM_T_82574:
   5211 	case WM_T_82575:
   5212 	case WM_T_82576:
   5213 	case WM_T_82583:
   5214 	default:
   5215 		/* Everything else can safely use the documented method. */
   5216 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5217 		break;
   5218 	}
   5219 
   5220 	/* Must release the MDIO ownership after MAC reset */
   5221 	switch (sc->sc_type) {
   5222 	case WM_T_82573:
   5223 	case WM_T_82574:
   5224 	case WM_T_82583:
   5225 		if (error == 0)
   5226 			wm_put_hw_semaphore_82573(sc);
   5227 		break;
   5228 	default:
   5229 		break;
   5230 	}
   5231 
   5232 	/* Set Phy Config Counter to 50msec */
   5233 	if (sc->sc_type == WM_T_PCH2) {
   5234 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5235 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5236 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5237 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5238 	}
   5239 
   5240 	if (phy_reset != 0)
   5241 		wm_get_cfg_done(sc);
   5242 
   5243 	/* Reload EEPROM */
   5244 	switch (sc->sc_type) {
   5245 	case WM_T_82542_2_0:
   5246 	case WM_T_82542_2_1:
   5247 	case WM_T_82543:
   5248 	case WM_T_82544:
   5249 		delay(10);
   5250 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5251 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5252 		CSR_WRITE_FLUSH(sc);
   5253 		delay(2000);
   5254 		break;
   5255 	case WM_T_82540:
   5256 	case WM_T_82545:
   5257 	case WM_T_82545_3:
   5258 	case WM_T_82546:
   5259 	case WM_T_82546_3:
   5260 		delay(5*1000);
   5261 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5262 		break;
   5263 	case WM_T_82541:
   5264 	case WM_T_82541_2:
   5265 	case WM_T_82547:
   5266 	case WM_T_82547_2:
   5267 		delay(20000);
   5268 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5269 		break;
   5270 	case WM_T_82571:
   5271 	case WM_T_82572:
   5272 	case WM_T_82573:
   5273 	case WM_T_82574:
   5274 	case WM_T_82583:
   5275 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5276 			delay(10);
   5277 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5278 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5279 			CSR_WRITE_FLUSH(sc);
   5280 		}
   5281 		/* check EECD_EE_AUTORD */
   5282 		wm_get_auto_rd_done(sc);
   5283 		/*
   5284 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5285 		 * is set.
   5286 		 */
   5287 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5288 		    || (sc->sc_type == WM_T_82583))
   5289 			delay(25*1000);
   5290 		break;
   5291 	case WM_T_82575:
   5292 	case WM_T_82576:
   5293 	case WM_T_82580:
   5294 	case WM_T_I350:
   5295 	case WM_T_I354:
   5296 	case WM_T_I210:
   5297 	case WM_T_I211:
   5298 	case WM_T_80003:
   5299 		/* check EECD_EE_AUTORD */
   5300 		wm_get_auto_rd_done(sc);
   5301 		break;
   5302 	case WM_T_ICH8:
   5303 	case WM_T_ICH9:
   5304 	case WM_T_ICH10:
   5305 	case WM_T_PCH:
   5306 	case WM_T_PCH2:
   5307 	case WM_T_PCH_LPT:
   5308 	case WM_T_PCH_SPT:
   5309 	case WM_T_PCH_CNP:
   5310 		break;
   5311 	default:
   5312 		panic("%s: unknown type\n", __func__);
   5313 	}
   5314 
   5315 	/* Check whether EEPROM is present or not */
   5316 	switch (sc->sc_type) {
   5317 	case WM_T_82575:
   5318 	case WM_T_82576:
   5319 	case WM_T_82580:
   5320 	case WM_T_I350:
   5321 	case WM_T_I354:
   5322 	case WM_T_ICH8:
   5323 	case WM_T_ICH9:
   5324 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5325 			/* Not found */
   5326 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5327 			if (sc->sc_type == WM_T_82575)
   5328 				wm_reset_init_script_82575(sc);
   5329 		}
   5330 		break;
   5331 	default:
   5332 		break;
   5333 	}
   5334 
   5335 	if (phy_reset != 0)
   5336 		wm_phy_post_reset(sc);
   5337 
   5338 	if ((sc->sc_type == WM_T_82580)
   5339 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5340 		/* Clear global device reset status bit */
   5341 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5342 	}
   5343 
   5344 	/* Clear any pending interrupt events. */
   5345 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5346 	reg = CSR_READ(sc, WMREG_ICR);
   5347 	if (wm_is_using_msix(sc)) {
   5348 		if (sc->sc_type != WM_T_82574) {
   5349 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5350 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5351 		} else
   5352 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5353 	}
   5354 
   5355 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5356 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5357 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5358 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5359 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5360 		reg |= KABGTXD_BGSQLBIAS;
   5361 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5362 	}
   5363 
   5364 	/* Reload sc_ctrl */
   5365 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5366 
   5367 	wm_set_eee(sc);
   5368 
   5369 	/*
   5370 	 * For PCH, this write will make sure that any noise will be detected
   5371 	 * as a CRC error and be dropped rather than show up as a bad packet
   5372 	 * to the DMA engine
   5373 	 */
   5374 	if (sc->sc_type == WM_T_PCH)
   5375 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5376 
   5377 	if (sc->sc_type >= WM_T_82544)
   5378 		CSR_WRITE(sc, WMREG_WUC, 0);
   5379 
   5380 	if (sc->sc_type < WM_T_82575)
   5381 		wm_disable_aspm(sc); /* Workaround for some chips */
   5382 
   5383 	wm_reset_mdicnfg_82580(sc);
   5384 
   5385 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5386 		wm_pll_workaround_i210(sc);
   5387 
   5388 	if (sc->sc_type == WM_T_80003) {
   5389 		/* Default to TRUE to enable the MDIC W/A */
   5390 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5391 
   5392 		rv = wm_kmrn_readreg(sc,
   5393 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5394 		if (rv == 0) {
   5395 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5396 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5397 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5398 			else
   5399 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5400 		}
   5401 	}
   5402 }
   5403 
   5404 /*
   5405  * wm_add_rxbuf:
   5406  *
   5407  *	Add a receive buffer to the indiciated descriptor.
   5408  */
   5409 static int
   5410 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5411 {
   5412 	struct wm_softc *sc = rxq->rxq_sc;
   5413 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5414 	struct mbuf *m;
   5415 	int error;
   5416 
   5417 	KASSERT(mutex_owned(rxq->rxq_lock));
   5418 
   5419 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5420 	if (m == NULL)
   5421 		return ENOBUFS;
   5422 
   5423 	MCLGET(m, M_DONTWAIT);
   5424 	if ((m->m_flags & M_EXT) == 0) {
   5425 		m_freem(m);
   5426 		return ENOBUFS;
   5427 	}
   5428 
   5429 	if (rxs->rxs_mbuf != NULL)
   5430 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5431 
   5432 	rxs->rxs_mbuf = m;
   5433 
   5434 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5435 	/*
   5436 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5437 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5438 	 */
   5439 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5440 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5441 	if (error) {
   5442 		/* XXX XXX XXX */
   5443 		aprint_error_dev(sc->sc_dev,
   5444 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5445 		panic("wm_add_rxbuf");
   5446 	}
   5447 
   5448 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5449 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5450 
   5451 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5452 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5453 			wm_init_rxdesc(rxq, idx);
   5454 	} else
   5455 		wm_init_rxdesc(rxq, idx);
   5456 
   5457 	return 0;
   5458 }
   5459 
   5460 /*
   5461  * wm_rxdrain:
   5462  *
   5463  *	Drain the receive queue.
   5464  */
   5465 static void
   5466 wm_rxdrain(struct wm_rxqueue *rxq)
   5467 {
   5468 	struct wm_softc *sc = rxq->rxq_sc;
   5469 	struct wm_rxsoft *rxs;
   5470 	int i;
   5471 
   5472 	KASSERT(mutex_owned(rxq->rxq_lock));
   5473 
   5474 	for (i = 0; i < WM_NRXDESC; i++) {
   5475 		rxs = &rxq->rxq_soft[i];
   5476 		if (rxs->rxs_mbuf != NULL) {
   5477 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5478 			m_freem(rxs->rxs_mbuf);
   5479 			rxs->rxs_mbuf = NULL;
   5480 		}
   5481 	}
   5482 }
   5483 
   5484 /*
   5485  * Setup registers for RSS.
   5486  *
   5487  * XXX not yet VMDq support
   5488  */
   5489 static void
   5490 wm_init_rss(struct wm_softc *sc)
   5491 {
   5492 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5493 	int i;
   5494 
   5495 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5496 
   5497 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5498 		unsigned int qid, reta_ent;
   5499 
   5500 		qid  = i % sc->sc_nqueues;
   5501 		switch (sc->sc_type) {
   5502 		case WM_T_82574:
   5503 			reta_ent = __SHIFTIN(qid,
   5504 			    RETA_ENT_QINDEX_MASK_82574);
   5505 			break;
   5506 		case WM_T_82575:
   5507 			reta_ent = __SHIFTIN(qid,
   5508 			    RETA_ENT_QINDEX1_MASK_82575);
   5509 			break;
   5510 		default:
   5511 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5512 			break;
   5513 		}
   5514 
   5515 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5516 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5517 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5518 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5519 	}
   5520 
   5521 	rss_getkey((uint8_t *)rss_key);
   5522 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5523 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5524 
   5525 	if (sc->sc_type == WM_T_82574)
   5526 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5527 	else
   5528 		mrqc = MRQC_ENABLE_RSS_MQ;
   5529 
   5530 	/*
   5531 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5532 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5533 	 */
   5534 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5535 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5536 #if 0
   5537 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5538 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5539 #endif
   5540 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5541 
   5542 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5543 }
   5544 
   5545 /*
   5546  * Adjust TX and RX queue numbers which the system actulally uses.
   5547  *
   5548  * The numbers are affected by below parameters.
   5549  *     - The nubmer of hardware queues
   5550  *     - The number of MSI-X vectors (= "nvectors" argument)
   5551  *     - ncpu
   5552  */
   5553 static void
   5554 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5555 {
   5556 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5557 
   5558 	if (nvectors < 2) {
   5559 		sc->sc_nqueues = 1;
   5560 		return;
   5561 	}
   5562 
   5563 	switch (sc->sc_type) {
   5564 	case WM_T_82572:
   5565 		hw_ntxqueues = 2;
   5566 		hw_nrxqueues = 2;
   5567 		break;
   5568 	case WM_T_82574:
   5569 		hw_ntxqueues = 2;
   5570 		hw_nrxqueues = 2;
   5571 		break;
   5572 	case WM_T_82575:
   5573 		hw_ntxqueues = 4;
   5574 		hw_nrxqueues = 4;
   5575 		break;
   5576 	case WM_T_82576:
   5577 		hw_ntxqueues = 16;
   5578 		hw_nrxqueues = 16;
   5579 		break;
   5580 	case WM_T_82580:
   5581 	case WM_T_I350:
   5582 	case WM_T_I354:
   5583 		hw_ntxqueues = 8;
   5584 		hw_nrxqueues = 8;
   5585 		break;
   5586 	case WM_T_I210:
   5587 		hw_ntxqueues = 4;
   5588 		hw_nrxqueues = 4;
   5589 		break;
   5590 	case WM_T_I211:
   5591 		hw_ntxqueues = 2;
   5592 		hw_nrxqueues = 2;
   5593 		break;
   5594 		/*
   5595 		 * The below Ethernet controllers do not support MSI-X;
   5596 		 * this driver doesn't let them use multiqueue.
   5597 		 *     - WM_T_80003
   5598 		 *     - WM_T_ICH8
   5599 		 *     - WM_T_ICH9
   5600 		 *     - WM_T_ICH10
   5601 		 *     - WM_T_PCH
   5602 		 *     - WM_T_PCH2
   5603 		 *     - WM_T_PCH_LPT
   5604 		 */
   5605 	default:
   5606 		hw_ntxqueues = 1;
   5607 		hw_nrxqueues = 1;
   5608 		break;
   5609 	}
   5610 
   5611 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5612 
   5613 	/*
   5614 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5615 	 * the number of queues used actually.
   5616 	 */
   5617 	if (nvectors < hw_nqueues + 1)
   5618 		sc->sc_nqueues = nvectors - 1;
   5619 	else
   5620 		sc->sc_nqueues = hw_nqueues;
   5621 
   5622 	/*
   5623 	 * As queues more than CPUs cannot improve scaling, we limit
   5624 	 * the number of queues used actually.
   5625 	 */
   5626 	if (ncpu < sc->sc_nqueues)
   5627 		sc->sc_nqueues = ncpu;
   5628 }
   5629 
   5630 static inline bool
   5631 wm_is_using_msix(struct wm_softc *sc)
   5632 {
   5633 
   5634 	return (sc->sc_nintrs > 1);
   5635 }
   5636 
   5637 static inline bool
   5638 wm_is_using_multiqueue(struct wm_softc *sc)
   5639 {
   5640 
   5641 	return (sc->sc_nqueues > 1);
   5642 }
   5643 
   5644 static int
   5645 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
   5646 {
   5647 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5648 
   5649 	wmq->wmq_id = qidx;
   5650 	wmq->wmq_intr_idx = intr_idx;
   5651 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
   5652 	    wm_handle_queue, wmq);
   5653 	if (wmq->wmq_si != NULL)
   5654 		return 0;
   5655 
   5656 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5657 	    wmq->wmq_id);
   5658 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5659 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5660 	return ENOMEM;
   5661 }
   5662 
   5663 /*
   5664  * Both single interrupt MSI and INTx can use this function.
   5665  */
   5666 static int
   5667 wm_setup_legacy(struct wm_softc *sc)
   5668 {
   5669 	pci_chipset_tag_t pc = sc->sc_pc;
   5670 	const char *intrstr = NULL;
   5671 	char intrbuf[PCI_INTRSTR_LEN];
   5672 	int error;
   5673 
   5674 	error = wm_alloc_txrx_queues(sc);
   5675 	if (error) {
   5676 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5677 		    error);
   5678 		return ENOMEM;
   5679 	}
   5680 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5681 	    sizeof(intrbuf));
   5682 #ifdef WM_MPSAFE
   5683 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5684 #endif
   5685 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5686 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5687 	if (sc->sc_ihs[0] == NULL) {
   5688 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5689 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5690 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5691 		return ENOMEM;
   5692 	}
   5693 
   5694 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5695 	sc->sc_nintrs = 1;
   5696 
   5697 	return wm_softint_establish_queue(sc, 0, 0);
   5698 }
   5699 
   5700 static int
   5701 wm_setup_msix(struct wm_softc *sc)
   5702 {
   5703 	void *vih;
   5704 	kcpuset_t *affinity;
   5705 	int qidx, error, intr_idx, txrx_established;
   5706 	pci_chipset_tag_t pc = sc->sc_pc;
   5707 	const char *intrstr = NULL;
   5708 	char intrbuf[PCI_INTRSTR_LEN];
   5709 	char intr_xname[INTRDEVNAMEBUF];
   5710 
   5711 	if (sc->sc_nqueues < ncpu) {
   5712 		/*
   5713 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5714 		 * interrupts start from CPU#1.
   5715 		 */
   5716 		sc->sc_affinity_offset = 1;
   5717 	} else {
   5718 		/*
   5719 		 * In this case, this device use all CPUs. So, we unify
   5720 		 * affinitied cpu_index to msix vector number for readability.
   5721 		 */
   5722 		sc->sc_affinity_offset = 0;
   5723 	}
   5724 
   5725 	error = wm_alloc_txrx_queues(sc);
   5726 	if (error) {
   5727 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5728 		    error);
   5729 		return ENOMEM;
   5730 	}
   5731 
   5732 	kcpuset_create(&affinity, false);
   5733 	intr_idx = 0;
   5734 
   5735 	/*
   5736 	 * TX and RX
   5737 	 */
   5738 	txrx_established = 0;
   5739 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5740 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5741 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5742 
   5743 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5744 		    sizeof(intrbuf));
   5745 #ifdef WM_MPSAFE
   5746 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5747 		    PCI_INTR_MPSAFE, true);
   5748 #endif
   5749 		memset(intr_xname, 0, sizeof(intr_xname));
   5750 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5751 		    device_xname(sc->sc_dev), qidx);
   5752 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5753 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5754 		if (vih == NULL) {
   5755 			aprint_error_dev(sc->sc_dev,
   5756 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5757 			    intrstr ? " at " : "",
   5758 			    intrstr ? intrstr : "");
   5759 
   5760 			goto fail;
   5761 		}
   5762 		kcpuset_zero(affinity);
   5763 		/* Round-robin affinity */
   5764 		kcpuset_set(affinity, affinity_to);
   5765 		error = interrupt_distribute(vih, affinity, NULL);
   5766 		if (error == 0) {
   5767 			aprint_normal_dev(sc->sc_dev,
   5768 			    "for TX and RX interrupting at %s affinity to %u\n",
   5769 			    intrstr, affinity_to);
   5770 		} else {
   5771 			aprint_normal_dev(sc->sc_dev,
   5772 			    "for TX and RX interrupting at %s\n", intrstr);
   5773 		}
   5774 		sc->sc_ihs[intr_idx] = vih;
   5775 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
   5776 			goto fail;
   5777 		txrx_established++;
   5778 		intr_idx++;
   5779 	}
   5780 
   5781 	/* LINK */
   5782 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5783 	    sizeof(intrbuf));
   5784 #ifdef WM_MPSAFE
   5785 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5786 #endif
   5787 	memset(intr_xname, 0, sizeof(intr_xname));
   5788 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5789 	    device_xname(sc->sc_dev));
   5790 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5791 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5792 	if (vih == NULL) {
   5793 		aprint_error_dev(sc->sc_dev,
   5794 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5795 		    intrstr ? " at " : "",
   5796 		    intrstr ? intrstr : "");
   5797 
   5798 		goto fail;
   5799 	}
   5800 	/* Keep default affinity to LINK interrupt */
   5801 	aprint_normal_dev(sc->sc_dev,
   5802 	    "for LINK interrupting at %s\n", intrstr);
   5803 	sc->sc_ihs[intr_idx] = vih;
   5804 	sc->sc_link_intr_idx = intr_idx;
   5805 
   5806 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5807 	kcpuset_destroy(affinity);
   5808 	return 0;
   5809 
   5810  fail:
   5811 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5812 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5813 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5814 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5815 	}
   5816 
   5817 	kcpuset_destroy(affinity);
   5818 	return ENOMEM;
   5819 }
   5820 
   5821 static void
   5822 wm_unset_stopping_flags(struct wm_softc *sc)
   5823 {
   5824 	int i;
   5825 
   5826 	KASSERT(WM_CORE_LOCKED(sc));
   5827 
   5828 	/* Must unset stopping flags in ascending order. */
   5829 	for (i = 0; i < sc->sc_nqueues; i++) {
   5830 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5831 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5832 
   5833 		mutex_enter(txq->txq_lock);
   5834 		txq->txq_stopping = false;
   5835 		mutex_exit(txq->txq_lock);
   5836 
   5837 		mutex_enter(rxq->rxq_lock);
   5838 		rxq->rxq_stopping = false;
   5839 		mutex_exit(rxq->rxq_lock);
   5840 	}
   5841 
   5842 	sc->sc_core_stopping = false;
   5843 }
   5844 
   5845 static void
   5846 wm_set_stopping_flags(struct wm_softc *sc)
   5847 {
   5848 	int i;
   5849 
   5850 	KASSERT(WM_CORE_LOCKED(sc));
   5851 
   5852 	sc->sc_core_stopping = true;
   5853 
   5854 	/* Must set stopping flags in ascending order. */
   5855 	for (i = 0; i < sc->sc_nqueues; i++) {
   5856 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5857 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5858 
   5859 		mutex_enter(rxq->rxq_lock);
   5860 		rxq->rxq_stopping = true;
   5861 		mutex_exit(rxq->rxq_lock);
   5862 
   5863 		mutex_enter(txq->txq_lock);
   5864 		txq->txq_stopping = true;
   5865 		mutex_exit(txq->txq_lock);
   5866 	}
   5867 }
   5868 
   5869 /*
   5870  * Write interrupt interval value to ITR or EITR
   5871  */
   5872 static void
   5873 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5874 {
   5875 
   5876 	if (!wmq->wmq_set_itr)
   5877 		return;
   5878 
   5879 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5880 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5881 
   5882 		/*
   5883 		 * 82575 doesn't have CNT_INGR field.
   5884 		 * So, overwrite counter field by software.
   5885 		 */
   5886 		if (sc->sc_type == WM_T_82575)
   5887 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5888 		else
   5889 			eitr |= EITR_CNT_INGR;
   5890 
   5891 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5892 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5893 		/*
   5894 		 * 82574 has both ITR and EITR. SET EITR when we use
   5895 		 * the multi queue function with MSI-X.
   5896 		 */
   5897 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5898 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5899 	} else {
   5900 		KASSERT(wmq->wmq_id == 0);
   5901 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5902 	}
   5903 
   5904 	wmq->wmq_set_itr = false;
   5905 }
   5906 
   5907 /*
   5908  * TODO
   5909  * Below dynamic calculation of itr is almost the same as Linux igb,
   5910  * however it does not fit to wm(4). So, we will have been disable AIM
   5911  * until we will find appropriate calculation of itr.
   5912  */
   5913 /*
   5914  * Calculate interrupt interval value to be going to write register in
   5915  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5916  */
   5917 static void
   5918 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5919 {
   5920 #ifdef NOTYET
   5921 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5922 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5923 	uint32_t avg_size = 0;
   5924 	uint32_t new_itr;
   5925 
   5926 	if (rxq->rxq_packets)
   5927 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5928 	if (txq->txq_packets)
   5929 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5930 
   5931 	if (avg_size == 0) {
   5932 		new_itr = 450; /* restore default value */
   5933 		goto out;
   5934 	}
   5935 
   5936 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5937 	avg_size += 24;
   5938 
   5939 	/* Don't starve jumbo frames */
   5940 	avg_size = uimin(avg_size, 3000);
   5941 
   5942 	/* Give a little boost to mid-size frames */
   5943 	if ((avg_size > 300) && (avg_size < 1200))
   5944 		new_itr = avg_size / 3;
   5945 	else
   5946 		new_itr = avg_size / 2;
   5947 
   5948 out:
   5949 	/*
   5950 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5951 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5952 	 */
   5953 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5954 		new_itr *= 4;
   5955 
   5956 	if (new_itr != wmq->wmq_itr) {
   5957 		wmq->wmq_itr = new_itr;
   5958 		wmq->wmq_set_itr = true;
   5959 	} else
   5960 		wmq->wmq_set_itr = false;
   5961 
   5962 	rxq->rxq_packets = 0;
   5963 	rxq->rxq_bytes = 0;
   5964 	txq->txq_packets = 0;
   5965 	txq->txq_bytes = 0;
   5966 #endif
   5967 }
   5968 
   5969 static void
   5970 wm_init_sysctls(struct wm_softc *sc)
   5971 {
   5972 	struct sysctllog **log;
   5973 	const struct sysctlnode *rnode, *qnode, *cnode;
   5974 	int i, rv;
   5975 	const char *dvname;
   5976 
   5977 	log = &sc->sc_sysctllog;
   5978 	dvname = device_xname(sc->sc_dev);
   5979 
   5980 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5981 	    0, CTLTYPE_NODE, dvname,
   5982 	    SYSCTL_DESCR("wm information and settings"),
   5983 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5984 	if (rv != 0)
   5985 		goto err;
   5986 
   5987 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5988 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5989 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5990 	if (rv != 0)
   5991 		goto teardown;
   5992 
   5993 	for (i = 0; i < sc->sc_nqueues; i++) {
   5994 		struct wm_queue *wmq = &sc->sc_queue[i];
   5995 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5996 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5997 
   5998 		snprintf(sc->sc_queue[i].sysctlname,
   5999 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
   6000 
   6001 		if (sysctl_createv(log, 0, &rnode, &qnode,
   6002 		    0, CTLTYPE_NODE,
   6003 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
   6004 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   6005 			break;
   6006 
   6007 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6008 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6009 		    "txq_free", SYSCTL_DESCR("TX queue free"),
   6010 		    NULL, 0, &txq->txq_free,
   6011 		    0, CTL_CREATE, CTL_EOL) != 0)
   6012 			break;
   6013 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6014 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6015 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
   6016 		    wm_sysctl_tdh_handler, 0, (void *)txq,
   6017 		    0, CTL_CREATE, CTL_EOL) != 0)
   6018 			break;
   6019 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6020 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6021 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
   6022 		    wm_sysctl_tdt_handler, 0, (void *)txq,
   6023 		    0, CTL_CREATE, CTL_EOL) != 0)
   6024 			break;
   6025 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6026 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6027 		    "txq_next", SYSCTL_DESCR("TX queue next"),
   6028 		    NULL, 0, &txq->txq_next,
   6029 		    0, CTL_CREATE, CTL_EOL) != 0)
   6030 			break;
   6031 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6032 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6033 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
   6034 		    NULL, 0, &txq->txq_sfree,
   6035 		    0, CTL_CREATE, CTL_EOL) != 0)
   6036 			break;
   6037 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6038 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6039 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
   6040 		    NULL, 0, &txq->txq_snext,
   6041 		    0, CTL_CREATE, CTL_EOL) != 0)
   6042 			break;
   6043 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6044 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6045 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
   6046 		    NULL, 0, &txq->txq_sdirty,
   6047 		    0, CTL_CREATE, CTL_EOL) != 0)
   6048 			break;
   6049 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6050 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6051 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
   6052 		    NULL, 0, &txq->txq_flags,
   6053 		    0, CTL_CREATE, CTL_EOL) != 0)
   6054 			break;
   6055 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6056 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6057 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
   6058 		    NULL, 0, &txq->txq_stopping,
   6059 		    0, CTL_CREATE, CTL_EOL) != 0)
   6060 			break;
   6061 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6062 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   6063 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
   6064 		    NULL, 0, &txq->txq_sending,
   6065 		    0, CTL_CREATE, CTL_EOL) != 0)
   6066 			break;
   6067 
   6068 		if (sysctl_createv(log, 0, &qnode, &cnode,
   6069 		    CTLFLAG_READONLY, CTLTYPE_INT,
   6070 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
   6071 		    NULL, 0, &rxq->rxq_ptr,
   6072 		    0, CTL_CREATE, CTL_EOL) != 0)
   6073 			break;
   6074 	}
   6075 
   6076 #ifdef WM_DEBUG
   6077 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   6078 	    CTLTYPE_INT, "debug_flags",
   6079 	    SYSCTL_DESCR(
   6080 		    "Debug flags:\n"	\
   6081 		    "\t0x01 LINK\n"	\
   6082 		    "\t0x02 TX\n"	\
   6083 		    "\t0x04 RX\n"	\
   6084 		    "\t0x08 GMII\n"	\
   6085 		    "\t0x10 MANAGE\n"	\
   6086 		    "\t0x20 NVM\n"	\
   6087 		    "\t0x40 INIT\n"	\
   6088 		    "\t0x80 LOCK"),
   6089 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
   6090 	if (rv != 0)
   6091 		goto teardown;
   6092 #endif
   6093 
   6094 	return;
   6095 
   6096 teardown:
   6097 	sysctl_teardown(log);
   6098 err:
   6099 	sc->sc_sysctllog = NULL;
   6100 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   6101 	    __func__, rv);
   6102 }
   6103 
   6104 /*
   6105  * wm_init:		[ifnet interface function]
   6106  *
   6107  *	Initialize the interface.
   6108  */
   6109 static int
   6110 wm_init(struct ifnet *ifp)
   6111 {
   6112 	struct wm_softc *sc = ifp->if_softc;
   6113 	int ret;
   6114 
   6115 	WM_CORE_LOCK(sc);
   6116 	ret = wm_init_locked(ifp);
   6117 	WM_CORE_UNLOCK(sc);
   6118 
   6119 	return ret;
   6120 }
   6121 
   6122 static int
   6123 wm_init_locked(struct ifnet *ifp)
   6124 {
   6125 	struct wm_softc *sc = ifp->if_softc;
   6126 	struct ethercom *ec = &sc->sc_ethercom;
   6127 	int i, j, trynum, error = 0;
   6128 	uint32_t reg, sfp_mask = 0;
   6129 
   6130 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6131 		device_xname(sc->sc_dev), __func__));
   6132 	KASSERT(WM_CORE_LOCKED(sc));
   6133 
   6134 	/*
   6135 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   6136 	 * There is a small but measurable benefit to avoiding the adjusment
   6137 	 * of the descriptor so that the headers are aligned, for normal mtu,
   6138 	 * on such platforms.  One possibility is that the DMA itself is
   6139 	 * slightly more efficient if the front of the entire packet (instead
   6140 	 * of the front of the headers) is aligned.
   6141 	 *
   6142 	 * Note we must always set align_tweak to 0 if we are using
   6143 	 * jumbo frames.
   6144 	 */
   6145 #ifdef __NO_STRICT_ALIGNMENT
   6146 	sc->sc_align_tweak = 0;
   6147 #else
   6148 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   6149 		sc->sc_align_tweak = 0;
   6150 	else
   6151 		sc->sc_align_tweak = 2;
   6152 #endif /* __NO_STRICT_ALIGNMENT */
   6153 
   6154 	/* Cancel any pending I/O. */
   6155 	wm_stop_locked(ifp, false, false);
   6156 
   6157 	/* Update statistics before reset */
   6158 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   6159 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   6160 
   6161 	/* >= PCH_SPT hardware workaround before reset. */
   6162 	if (sc->sc_type >= WM_T_PCH_SPT)
   6163 		wm_flush_desc_rings(sc);
   6164 
   6165 	/* Reset the chip to a known state. */
   6166 	wm_reset(sc);
   6167 
   6168 	/*
   6169 	 * AMT based hardware can now take control from firmware
   6170 	 * Do this after reset.
   6171 	 */
   6172 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   6173 		wm_get_hw_control(sc);
   6174 
   6175 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   6176 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   6177 		wm_legacy_irq_quirk_spt(sc);
   6178 
   6179 	/* Init hardware bits */
   6180 	wm_initialize_hardware_bits(sc);
   6181 
   6182 	/* Reset the PHY. */
   6183 	if (sc->sc_flags & WM_F_HAS_MII)
   6184 		wm_gmii_reset(sc);
   6185 
   6186 	if (sc->sc_type >= WM_T_ICH8) {
   6187 		reg = CSR_READ(sc, WMREG_GCR);
   6188 		/*
   6189 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   6190 		 * default after reset.
   6191 		 */
   6192 		if (sc->sc_type == WM_T_ICH8)
   6193 			reg |= GCR_NO_SNOOP_ALL;
   6194 		else
   6195 			reg &= ~GCR_NO_SNOOP_ALL;
   6196 		CSR_WRITE(sc, WMREG_GCR, reg);
   6197 	}
   6198 
   6199 	if ((sc->sc_type >= WM_T_ICH8)
   6200 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   6201 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   6202 
   6203 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6204 		reg |= CTRL_EXT_RO_DIS;
   6205 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6206 	}
   6207 
   6208 	/* Calculate (E)ITR value */
   6209 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   6210 		/*
   6211 		 * For NEWQUEUE's EITR (except for 82575).
   6212 		 * 82575's EITR should be set same throttling value as other
   6213 		 * old controllers' ITR because the interrupt/sec calculation
   6214 		 * is the same, that is, 1,000,000,000 / (N * 256).
   6215 		 *
   6216 		 * 82574's EITR should be set same throttling value as ITR.
   6217 		 *
   6218 		 * For N interrupts/sec, set this value to:
   6219 		 * 1,000,000 / N in contrast to ITR throttling value.
   6220 		 */
   6221 		sc->sc_itr_init = 450;
   6222 	} else if (sc->sc_type >= WM_T_82543) {
   6223 		/*
   6224 		 * Set up the interrupt throttling register (units of 256ns)
   6225 		 * Note that a footnote in Intel's documentation says this
   6226 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   6227 		 * or 10Mbit mode.  Empirically, it appears to be the case
   6228 		 * that that is also true for the 1024ns units of the other
   6229 		 * interrupt-related timer registers -- so, really, we ought
   6230 		 * to divide this value by 4 when the link speed is low.
   6231 		 *
   6232 		 * XXX implement this division at link speed change!
   6233 		 */
   6234 
   6235 		/*
   6236 		 * For N interrupts/sec, set this value to:
   6237 		 * 1,000,000,000 / (N * 256).  Note that we set the
   6238 		 * absolute and packet timer values to this value
   6239 		 * divided by 4 to get "simple timer" behavior.
   6240 		 */
   6241 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   6242 	}
   6243 
   6244 	error = wm_init_txrx_queues(sc);
   6245 	if (error)
   6246 		goto out;
   6247 
   6248 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   6249 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   6250 	    (sc->sc_type >= WM_T_82575))
   6251 		wm_serdes_power_up_link_82575(sc);
   6252 
   6253 	/* Clear out the VLAN table -- we don't use it (yet). */
   6254 	CSR_WRITE(sc, WMREG_VET, 0);
   6255 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   6256 		trynum = 10; /* Due to hw errata */
   6257 	else
   6258 		trynum = 1;
   6259 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   6260 		for (j = 0; j < trynum; j++)
   6261 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   6262 
   6263 	/*
   6264 	 * Set up flow-control parameters.
   6265 	 *
   6266 	 * XXX Values could probably stand some tuning.
   6267 	 */
   6268 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   6269 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   6270 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   6271 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   6272 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   6273 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   6274 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   6275 	}
   6276 
   6277 	sc->sc_fcrtl = FCRTL_DFLT;
   6278 	if (sc->sc_type < WM_T_82543) {
   6279 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   6280 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   6281 	} else {
   6282 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   6283 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   6284 	}
   6285 
   6286 	if (sc->sc_type == WM_T_80003)
   6287 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   6288 	else
   6289 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   6290 
   6291 	/* Writes the control register. */
   6292 	wm_set_vlan(sc);
   6293 
   6294 	if (sc->sc_flags & WM_F_HAS_MII) {
   6295 		uint16_t kmreg;
   6296 
   6297 		switch (sc->sc_type) {
   6298 		case WM_T_80003:
   6299 		case WM_T_ICH8:
   6300 		case WM_T_ICH9:
   6301 		case WM_T_ICH10:
   6302 		case WM_T_PCH:
   6303 		case WM_T_PCH2:
   6304 		case WM_T_PCH_LPT:
   6305 		case WM_T_PCH_SPT:
   6306 		case WM_T_PCH_CNP:
   6307 			/*
   6308 			 * Set the mac to wait the maximum time between each
   6309 			 * iteration and increase the max iterations when
   6310 			 * polling the phy; this fixes erroneous timeouts at
   6311 			 * 10Mbps.
   6312 			 */
   6313 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6314 			    0xFFFF);
   6315 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6316 			    &kmreg);
   6317 			kmreg |= 0x3F;
   6318 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6319 			    kmreg);
   6320 			break;
   6321 		default:
   6322 			break;
   6323 		}
   6324 
   6325 		if (sc->sc_type == WM_T_80003) {
   6326 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6327 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6328 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6329 
   6330 			/* Bypass RX and TX FIFOs */
   6331 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6332 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6333 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6334 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6335 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6336 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6337 		}
   6338 	}
   6339 #if 0
   6340 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6341 #endif
   6342 
   6343 	/* Set up checksum offload parameters. */
   6344 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6345 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6346 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6347 		reg |= RXCSUM_IPOFL;
   6348 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6349 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6350 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6351 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6352 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6353 
   6354 	/* Set registers about MSI-X */
   6355 	if (wm_is_using_msix(sc)) {
   6356 		uint32_t ivar, qintr_idx;
   6357 		struct wm_queue *wmq;
   6358 		unsigned int qid;
   6359 
   6360 		if (sc->sc_type == WM_T_82575) {
   6361 			/* Interrupt control */
   6362 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6363 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6364 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6365 
   6366 			/* TX and RX */
   6367 			for (i = 0; i < sc->sc_nqueues; i++) {
   6368 				wmq = &sc->sc_queue[i];
   6369 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6370 				    EITR_TX_QUEUE(wmq->wmq_id)
   6371 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6372 			}
   6373 			/* Link status */
   6374 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6375 			    EITR_OTHER);
   6376 		} else if (sc->sc_type == WM_T_82574) {
   6377 			/* Interrupt control */
   6378 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6379 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6380 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6381 
   6382 			/*
   6383 			 * Work around issue with spurious interrupts
   6384 			 * in MSI-X mode.
   6385 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6386 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6387 			 */
   6388 			reg = CSR_READ(sc, WMREG_RFCTL);
   6389 			reg |= WMREG_RFCTL_ACKDIS;
   6390 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6391 
   6392 			ivar = 0;
   6393 			/* TX and RX */
   6394 			for (i = 0; i < sc->sc_nqueues; i++) {
   6395 				wmq = &sc->sc_queue[i];
   6396 				qid = wmq->wmq_id;
   6397 				qintr_idx = wmq->wmq_intr_idx;
   6398 
   6399 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6400 				    IVAR_TX_MASK_Q_82574(qid));
   6401 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6402 				    IVAR_RX_MASK_Q_82574(qid));
   6403 			}
   6404 			/* Link status */
   6405 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6406 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6407 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6408 		} else {
   6409 			/* Interrupt control */
   6410 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6411 			    | GPIE_EIAME | GPIE_PBA);
   6412 
   6413 			switch (sc->sc_type) {
   6414 			case WM_T_82580:
   6415 			case WM_T_I350:
   6416 			case WM_T_I354:
   6417 			case WM_T_I210:
   6418 			case WM_T_I211:
   6419 				/* TX and RX */
   6420 				for (i = 0; i < sc->sc_nqueues; i++) {
   6421 					wmq = &sc->sc_queue[i];
   6422 					qid = wmq->wmq_id;
   6423 					qintr_idx = wmq->wmq_intr_idx;
   6424 
   6425 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6426 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6427 					ivar |= __SHIFTIN((qintr_idx
   6428 						| IVAR_VALID),
   6429 					    IVAR_TX_MASK_Q(qid));
   6430 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6431 					ivar |= __SHIFTIN((qintr_idx
   6432 						| IVAR_VALID),
   6433 					    IVAR_RX_MASK_Q(qid));
   6434 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6435 				}
   6436 				break;
   6437 			case WM_T_82576:
   6438 				/* TX and RX */
   6439 				for (i = 0; i < sc->sc_nqueues; i++) {
   6440 					wmq = &sc->sc_queue[i];
   6441 					qid = wmq->wmq_id;
   6442 					qintr_idx = wmq->wmq_intr_idx;
   6443 
   6444 					ivar = CSR_READ(sc,
   6445 					    WMREG_IVAR_Q_82576(qid));
   6446 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6447 					ivar |= __SHIFTIN((qintr_idx
   6448 						| IVAR_VALID),
   6449 					    IVAR_TX_MASK_Q_82576(qid));
   6450 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6451 					ivar |= __SHIFTIN((qintr_idx
   6452 						| IVAR_VALID),
   6453 					    IVAR_RX_MASK_Q_82576(qid));
   6454 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6455 					    ivar);
   6456 				}
   6457 				break;
   6458 			default:
   6459 				break;
   6460 			}
   6461 
   6462 			/* Link status */
   6463 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6464 			    IVAR_MISC_OTHER);
   6465 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6466 		}
   6467 
   6468 		if (wm_is_using_multiqueue(sc)) {
   6469 			wm_init_rss(sc);
   6470 
   6471 			/*
   6472 			** NOTE: Receive Full-Packet Checksum Offload
   6473 			** is mutually exclusive with Multiqueue. However
   6474 			** this is not the same as TCP/IP checksums which
   6475 			** still work.
   6476 			*/
   6477 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6478 			reg |= RXCSUM_PCSD;
   6479 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6480 		}
   6481 	}
   6482 
   6483 	/* Set up the interrupt registers. */
   6484 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6485 
   6486 	/* Enable SFP module insertion interrupt if it's required */
   6487 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6488 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6489 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6490 		sfp_mask = ICR_GPI(0);
   6491 	}
   6492 
   6493 	if (wm_is_using_msix(sc)) {
   6494 		uint32_t mask;
   6495 		struct wm_queue *wmq;
   6496 
   6497 		switch (sc->sc_type) {
   6498 		case WM_T_82574:
   6499 			mask = 0;
   6500 			for (i = 0; i < sc->sc_nqueues; i++) {
   6501 				wmq = &sc->sc_queue[i];
   6502 				mask |= ICR_TXQ(wmq->wmq_id);
   6503 				mask |= ICR_RXQ(wmq->wmq_id);
   6504 			}
   6505 			mask |= ICR_OTHER;
   6506 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6507 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6508 			break;
   6509 		default:
   6510 			if (sc->sc_type == WM_T_82575) {
   6511 				mask = 0;
   6512 				for (i = 0; i < sc->sc_nqueues; i++) {
   6513 					wmq = &sc->sc_queue[i];
   6514 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6515 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6516 				}
   6517 				mask |= EITR_OTHER;
   6518 			} else {
   6519 				mask = 0;
   6520 				for (i = 0; i < sc->sc_nqueues; i++) {
   6521 					wmq = &sc->sc_queue[i];
   6522 					mask |= 1 << wmq->wmq_intr_idx;
   6523 				}
   6524 				mask |= 1 << sc->sc_link_intr_idx;
   6525 			}
   6526 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6527 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6528 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6529 
   6530 			/* For other interrupts */
   6531 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6532 			break;
   6533 		}
   6534 	} else {
   6535 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6536 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6537 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6538 	}
   6539 
   6540 	/* Set up the inter-packet gap. */
   6541 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6542 
   6543 	if (sc->sc_type >= WM_T_82543) {
   6544 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6545 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6546 			wm_itrs_writereg(sc, wmq);
   6547 		}
   6548 		/*
   6549 		 * Link interrupts occur much less than TX
   6550 		 * interrupts and RX interrupts. So, we don't
   6551 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6552 		 * FreeBSD's if_igb.
   6553 		 */
   6554 	}
   6555 
   6556 	/* Set the VLAN EtherType. */
   6557 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6558 
   6559 	/*
   6560 	 * Set up the transmit control register; we start out with
   6561 	 * a collision distance suitable for FDX, but update it when
   6562 	 * we resolve the media type.
   6563 	 */
   6564 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6565 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6566 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6567 	if (sc->sc_type >= WM_T_82571)
   6568 		sc->sc_tctl |= TCTL_MULR;
   6569 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6570 
   6571 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6572 		/* Write TDT after TCTL.EN is set. See the document. */
   6573 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6574 	}
   6575 
   6576 	if (sc->sc_type == WM_T_80003) {
   6577 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6578 		reg &= ~TCTL_EXT_GCEX_MASK;
   6579 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6580 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6581 	}
   6582 
   6583 	/* Set the media. */
   6584 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6585 		goto out;
   6586 
   6587 	/* Configure for OS presence */
   6588 	wm_init_manageability(sc);
   6589 
   6590 	/*
   6591 	 * Set up the receive control register; we actually program the
   6592 	 * register when we set the receive filter. Use multicast address
   6593 	 * offset type 0.
   6594 	 *
   6595 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6596 	 * don't enable that feature.
   6597 	 */
   6598 	sc->sc_mchash_type = 0;
   6599 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6600 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6601 
   6602 	/* 82574 use one buffer extended Rx descriptor. */
   6603 	if (sc->sc_type == WM_T_82574)
   6604 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6605 
   6606 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
   6607 		sc->sc_rctl |= RCTL_SECRC;
   6608 
   6609 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6610 	    && (ifp->if_mtu > ETHERMTU)) {
   6611 		sc->sc_rctl |= RCTL_LPE;
   6612 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6613 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6614 	}
   6615 
   6616 	if (MCLBYTES == 2048)
   6617 		sc->sc_rctl |= RCTL_2k;
   6618 	else {
   6619 		if (sc->sc_type >= WM_T_82543) {
   6620 			switch (MCLBYTES) {
   6621 			case 4096:
   6622 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6623 				break;
   6624 			case 8192:
   6625 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6626 				break;
   6627 			case 16384:
   6628 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6629 				break;
   6630 			default:
   6631 				panic("wm_init: MCLBYTES %d unsupported",
   6632 				    MCLBYTES);
   6633 				break;
   6634 			}
   6635 		} else
   6636 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6637 	}
   6638 
   6639 	/* Enable ECC */
   6640 	switch (sc->sc_type) {
   6641 	case WM_T_82571:
   6642 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6643 		reg |= PBA_ECC_CORR_EN;
   6644 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6645 		break;
   6646 	case WM_T_PCH_LPT:
   6647 	case WM_T_PCH_SPT:
   6648 	case WM_T_PCH_CNP:
   6649 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6650 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6651 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6652 
   6653 		sc->sc_ctrl |= CTRL_MEHE;
   6654 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6655 		break;
   6656 	default:
   6657 		break;
   6658 	}
   6659 
   6660 	/*
   6661 	 * Set the receive filter.
   6662 	 *
   6663 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6664 	 * the setting of RCTL.EN in wm_set_filter()
   6665 	 */
   6666 	wm_set_filter(sc);
   6667 
   6668 	/* On 575 and later set RDT only if RX enabled */
   6669 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6670 		int qidx;
   6671 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6672 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6673 			for (i = 0; i < WM_NRXDESC; i++) {
   6674 				mutex_enter(rxq->rxq_lock);
   6675 				wm_init_rxdesc(rxq, i);
   6676 				mutex_exit(rxq->rxq_lock);
   6677 
   6678 			}
   6679 		}
   6680 	}
   6681 
   6682 	wm_unset_stopping_flags(sc);
   6683 
   6684 	/* Start the one second link check clock. */
   6685 	callout_schedule(&sc->sc_tick_ch, hz);
   6686 
   6687 	/* ...all done! */
   6688 	ifp->if_flags |= IFF_RUNNING;
   6689 
   6690  out:
   6691 	/* Save last flags for the callback */
   6692 	sc->sc_if_flags = ifp->if_flags;
   6693 	sc->sc_ec_capenable = ec->ec_capenable;
   6694 	if (error)
   6695 		log(LOG_ERR, "%s: interface not running\n",
   6696 		    device_xname(sc->sc_dev));
   6697 	return error;
   6698 }
   6699 
   6700 /*
   6701  * wm_stop:		[ifnet interface function]
   6702  *
   6703  *	Stop transmission on the interface.
   6704  */
   6705 static void
   6706 wm_stop(struct ifnet *ifp, int disable)
   6707 {
   6708 	struct wm_softc *sc = ifp->if_softc;
   6709 
   6710 	ASSERT_SLEEPABLE();
   6711 
   6712 	WM_CORE_LOCK(sc);
   6713 	wm_stop_locked(ifp, disable ? true : false, true);
   6714 	WM_CORE_UNLOCK(sc);
   6715 
   6716 	/*
   6717 	 * After wm_set_stopping_flags(), it is guaranteed
   6718 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   6719 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   6720 	 * because it can sleep...
   6721 	 * so, call workqueue_wait() here.
   6722 	 */
   6723 	for (int i = 0; i < sc->sc_nqueues; i++)
   6724 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   6725 }
   6726 
   6727 static void
   6728 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
   6729 {
   6730 	struct wm_softc *sc = ifp->if_softc;
   6731 	struct wm_txsoft *txs;
   6732 	int i, qidx;
   6733 
   6734 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   6735 		device_xname(sc->sc_dev), __func__));
   6736 	KASSERT(WM_CORE_LOCKED(sc));
   6737 
   6738 	wm_set_stopping_flags(sc);
   6739 
   6740 	if (sc->sc_flags & WM_F_HAS_MII) {
   6741 		/* Down the MII. */
   6742 		mii_down(&sc->sc_mii);
   6743 	} else {
   6744 #if 0
   6745 		/* Should we clear PHY's status properly? */
   6746 		wm_reset(sc);
   6747 #endif
   6748 	}
   6749 
   6750 	/* Stop the transmit and receive processes. */
   6751 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6752 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6753 	sc->sc_rctl &= ~RCTL_EN;
   6754 
   6755 	/*
   6756 	 * Clear the interrupt mask to ensure the device cannot assert its
   6757 	 * interrupt line.
   6758 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6759 	 * service any currently pending or shared interrupt.
   6760 	 */
   6761 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6762 	sc->sc_icr = 0;
   6763 	if (wm_is_using_msix(sc)) {
   6764 		if (sc->sc_type != WM_T_82574) {
   6765 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6766 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6767 		} else
   6768 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6769 	}
   6770 
   6771 	/*
   6772 	 * Stop callouts after interrupts are disabled; if we have
   6773 	 * to wait for them, we will be releasing the CORE_LOCK
   6774 	 * briefly, which will unblock interrupts on the current CPU.
   6775 	 */
   6776 
   6777 	/* Stop the one second clock. */
   6778 	if (wait)
   6779 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
   6780 	else
   6781 		callout_stop(&sc->sc_tick_ch);
   6782 
   6783 	/* Stop the 82547 Tx FIFO stall check timer. */
   6784 	if (sc->sc_type == WM_T_82547) {
   6785 		if (wait)
   6786 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
   6787 		else
   6788 			callout_stop(&sc->sc_txfifo_ch);
   6789 	}
   6790 
   6791 	/* Release any queued transmit buffers. */
   6792 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6793 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6794 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6795 		struct mbuf *m;
   6796 
   6797 		mutex_enter(txq->txq_lock);
   6798 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6799 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6800 			txs = &txq->txq_soft[i];
   6801 			if (txs->txs_mbuf != NULL) {
   6802 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6803 				m_freem(txs->txs_mbuf);
   6804 				txs->txs_mbuf = NULL;
   6805 			}
   6806 		}
   6807 		/* Drain txq_interq */
   6808 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6809 			m_freem(m);
   6810 		mutex_exit(txq->txq_lock);
   6811 	}
   6812 
   6813 	/* Mark the interface as down and cancel the watchdog timer. */
   6814 	ifp->if_flags &= ~IFF_RUNNING;
   6815 
   6816 	if (disable) {
   6817 		for (i = 0; i < sc->sc_nqueues; i++) {
   6818 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6819 			mutex_enter(rxq->rxq_lock);
   6820 			wm_rxdrain(rxq);
   6821 			mutex_exit(rxq->rxq_lock);
   6822 		}
   6823 	}
   6824 
   6825 #if 0 /* notyet */
   6826 	if (sc->sc_type >= WM_T_82544)
   6827 		CSR_WRITE(sc, WMREG_WUC, 0);
   6828 #endif
   6829 }
   6830 
   6831 static void
   6832 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6833 {
   6834 	struct mbuf *m;
   6835 	int i;
   6836 
   6837 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6838 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6839 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6840 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6841 		    m->m_data, m->m_len, m->m_flags);
   6842 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6843 	    i, i == 1 ? "" : "s");
   6844 }
   6845 
   6846 /*
   6847  * wm_82547_txfifo_stall:
   6848  *
   6849  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6850  *	reset the FIFO pointers, and restart packet transmission.
   6851  */
   6852 static void
   6853 wm_82547_txfifo_stall(void *arg)
   6854 {
   6855 	struct wm_softc *sc = arg;
   6856 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6857 
   6858 	mutex_enter(txq->txq_lock);
   6859 
   6860 	if (txq->txq_stopping)
   6861 		goto out;
   6862 
   6863 	if (txq->txq_fifo_stall) {
   6864 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6865 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6866 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6867 			/*
   6868 			 * Packets have drained.  Stop transmitter, reset
   6869 			 * FIFO pointers, restart transmitter, and kick
   6870 			 * the packet queue.
   6871 			 */
   6872 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6873 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6874 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6875 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6876 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6877 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6878 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6879 			CSR_WRITE_FLUSH(sc);
   6880 
   6881 			txq->txq_fifo_head = 0;
   6882 			txq->txq_fifo_stall = 0;
   6883 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6884 		} else {
   6885 			/*
   6886 			 * Still waiting for packets to drain; try again in
   6887 			 * another tick.
   6888 			 */
   6889 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6890 		}
   6891 	}
   6892 
   6893 out:
   6894 	mutex_exit(txq->txq_lock);
   6895 }
   6896 
   6897 /*
   6898  * wm_82547_txfifo_bugchk:
   6899  *
   6900  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6901  *	prevent enqueueing a packet that would wrap around the end
   6902  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6903  *
   6904  *	We do this by checking the amount of space before the end
   6905  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6906  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6907  *	the internal FIFO pointers to the beginning, and restart
   6908  *	transmission on the interface.
   6909  */
   6910 #define	WM_FIFO_HDR		0x10
   6911 #define	WM_82547_PAD_LEN	0x3e0
   6912 static int
   6913 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6914 {
   6915 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6916 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6917 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6918 
   6919 	/* Just return if already stalled. */
   6920 	if (txq->txq_fifo_stall)
   6921 		return 1;
   6922 
   6923 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6924 		/* Stall only occurs in half-duplex mode. */
   6925 		goto send_packet;
   6926 	}
   6927 
   6928 	if (len >= WM_82547_PAD_LEN + space) {
   6929 		txq->txq_fifo_stall = 1;
   6930 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6931 		return 1;
   6932 	}
   6933 
   6934  send_packet:
   6935 	txq->txq_fifo_head += len;
   6936 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6937 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6938 
   6939 	return 0;
   6940 }
   6941 
   6942 static int
   6943 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6944 {
   6945 	int error;
   6946 
   6947 	/*
   6948 	 * Allocate the control data structures, and create and load the
   6949 	 * DMA map for it.
   6950 	 *
   6951 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6952 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6953 	 * both sets within the same 4G segment.
   6954 	 */
   6955 	if (sc->sc_type < WM_T_82544)
   6956 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6957 	else
   6958 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6959 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6960 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6961 	else
   6962 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6963 
   6964 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6965 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6966 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6967 		aprint_error_dev(sc->sc_dev,
   6968 		    "unable to allocate TX control data, error = %d\n",
   6969 		    error);
   6970 		goto fail_0;
   6971 	}
   6972 
   6973 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6974 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6975 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6976 		aprint_error_dev(sc->sc_dev,
   6977 		    "unable to map TX control data, error = %d\n", error);
   6978 		goto fail_1;
   6979 	}
   6980 
   6981 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6982 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6983 		aprint_error_dev(sc->sc_dev,
   6984 		    "unable to create TX control data DMA map, error = %d\n",
   6985 		    error);
   6986 		goto fail_2;
   6987 	}
   6988 
   6989 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6990 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6991 		aprint_error_dev(sc->sc_dev,
   6992 		    "unable to load TX control data DMA map, error = %d\n",
   6993 		    error);
   6994 		goto fail_3;
   6995 	}
   6996 
   6997 	return 0;
   6998 
   6999  fail_3:
   7000 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7001  fail_2:
   7002 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7003 	    WM_TXDESCS_SIZE(txq));
   7004  fail_1:
   7005 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7006  fail_0:
   7007 	return error;
   7008 }
   7009 
   7010 static void
   7011 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   7012 {
   7013 
   7014 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   7015 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   7016 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   7017 	    WM_TXDESCS_SIZE(txq));
   7018 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   7019 }
   7020 
   7021 static int
   7022 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7023 {
   7024 	int error;
   7025 	size_t rxq_descs_size;
   7026 
   7027 	/*
   7028 	 * Allocate the control data structures, and create and load the
   7029 	 * DMA map for it.
   7030 	 *
   7031 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   7032 	 * memory.  So must Rx descriptors.  We simplify by allocating
   7033 	 * both sets within the same 4G segment.
   7034 	 */
   7035 	rxq->rxq_ndesc = WM_NRXDESC;
   7036 	if (sc->sc_type == WM_T_82574)
   7037 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   7038 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7039 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   7040 	else
   7041 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   7042 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   7043 
   7044 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   7045 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   7046 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   7047 		aprint_error_dev(sc->sc_dev,
   7048 		    "unable to allocate RX control data, error = %d\n",
   7049 		    error);
   7050 		goto fail_0;
   7051 	}
   7052 
   7053 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   7054 		    rxq->rxq_desc_rseg, rxq_descs_size,
   7055 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   7056 		aprint_error_dev(sc->sc_dev,
   7057 		    "unable to map RX control data, error = %d\n", error);
   7058 		goto fail_1;
   7059 	}
   7060 
   7061 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   7062 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   7063 		aprint_error_dev(sc->sc_dev,
   7064 		    "unable to create RX control data DMA map, error = %d\n",
   7065 		    error);
   7066 		goto fail_2;
   7067 	}
   7068 
   7069 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   7070 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   7071 		aprint_error_dev(sc->sc_dev,
   7072 		    "unable to load RX control data DMA map, error = %d\n",
   7073 		    error);
   7074 		goto fail_3;
   7075 	}
   7076 
   7077 	return 0;
   7078 
   7079  fail_3:
   7080 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7081  fail_2:
   7082 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7083 	    rxq_descs_size);
   7084  fail_1:
   7085 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7086  fail_0:
   7087 	return error;
   7088 }
   7089 
   7090 static void
   7091 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7092 {
   7093 
   7094 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7095 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   7096 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   7097 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   7098 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   7099 }
   7100 
   7101 
   7102 static int
   7103 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7104 {
   7105 	int i, error;
   7106 
   7107 	/* Create the transmit buffer DMA maps. */
   7108 	WM_TXQUEUELEN(txq) =
   7109 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   7110 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   7111 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7112 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   7113 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   7114 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   7115 			aprint_error_dev(sc->sc_dev,
   7116 			    "unable to create Tx DMA map %d, error = %d\n",
   7117 			    i, error);
   7118 			goto fail;
   7119 		}
   7120 	}
   7121 
   7122 	return 0;
   7123 
   7124  fail:
   7125 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7126 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7127 			bus_dmamap_destroy(sc->sc_dmat,
   7128 			    txq->txq_soft[i].txs_dmamap);
   7129 	}
   7130 	return error;
   7131 }
   7132 
   7133 static void
   7134 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   7135 {
   7136 	int i;
   7137 
   7138 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   7139 		if (txq->txq_soft[i].txs_dmamap != NULL)
   7140 			bus_dmamap_destroy(sc->sc_dmat,
   7141 			    txq->txq_soft[i].txs_dmamap);
   7142 	}
   7143 }
   7144 
   7145 static int
   7146 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7147 {
   7148 	int i, error;
   7149 
   7150 	/* Create the receive buffer DMA maps. */
   7151 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7152 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   7153 			    MCLBYTES, 0, 0,
   7154 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   7155 			aprint_error_dev(sc->sc_dev,
   7156 			    "unable to create Rx DMA map %d error = %d\n",
   7157 			    i, error);
   7158 			goto fail;
   7159 		}
   7160 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   7161 	}
   7162 
   7163 	return 0;
   7164 
   7165  fail:
   7166 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7167 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7168 			bus_dmamap_destroy(sc->sc_dmat,
   7169 			    rxq->rxq_soft[i].rxs_dmamap);
   7170 	}
   7171 	return error;
   7172 }
   7173 
   7174 static void
   7175 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7176 {
   7177 	int i;
   7178 
   7179 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7180 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   7181 			bus_dmamap_destroy(sc->sc_dmat,
   7182 			    rxq->rxq_soft[i].rxs_dmamap);
   7183 	}
   7184 }
   7185 
   7186 /*
   7187  * wm_alloc_quques:
   7188  *	Allocate {tx,rx}descs and {tx,rx} buffers
   7189  */
   7190 static int
   7191 wm_alloc_txrx_queues(struct wm_softc *sc)
   7192 {
   7193 	int i, error, tx_done, rx_done;
   7194 
   7195 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   7196 	    KM_SLEEP);
   7197 	if (sc->sc_queue == NULL) {
   7198 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   7199 		error = ENOMEM;
   7200 		goto fail_0;
   7201 	}
   7202 
   7203 	/* For transmission */
   7204 	error = 0;
   7205 	tx_done = 0;
   7206 	for (i = 0; i < sc->sc_nqueues; i++) {
   7207 #ifdef WM_EVENT_COUNTERS
   7208 		int j;
   7209 		const char *xname;
   7210 #endif
   7211 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7212 		txq->txq_sc = sc;
   7213 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7214 
   7215 		error = wm_alloc_tx_descs(sc, txq);
   7216 		if (error)
   7217 			break;
   7218 		error = wm_alloc_tx_buffer(sc, txq);
   7219 		if (error) {
   7220 			wm_free_tx_descs(sc, txq);
   7221 			break;
   7222 		}
   7223 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   7224 		if (txq->txq_interq == NULL) {
   7225 			wm_free_tx_descs(sc, txq);
   7226 			wm_free_tx_buffer(sc, txq);
   7227 			error = ENOMEM;
   7228 			break;
   7229 		}
   7230 
   7231 #ifdef WM_EVENT_COUNTERS
   7232 		xname = device_xname(sc->sc_dev);
   7233 
   7234 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   7235 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   7236 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   7237 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   7238 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   7239 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   7240 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   7241 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   7242 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   7243 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   7244 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   7245 
   7246 		for (j = 0; j < WM_NTXSEGS; j++) {
   7247 			snprintf(txq->txq_txseg_evcnt_names[j],
   7248 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   7249 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   7250 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   7251 		}
   7252 
   7253 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   7254 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   7255 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   7256 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   7257 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   7258 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
   7259 #endif /* WM_EVENT_COUNTERS */
   7260 
   7261 		tx_done++;
   7262 	}
   7263 	if (error)
   7264 		goto fail_1;
   7265 
   7266 	/* For receive */
   7267 	error = 0;
   7268 	rx_done = 0;
   7269 	for (i = 0; i < sc->sc_nqueues; i++) {
   7270 #ifdef WM_EVENT_COUNTERS
   7271 		const char *xname;
   7272 #endif
   7273 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7274 		rxq->rxq_sc = sc;
   7275 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   7276 
   7277 		error = wm_alloc_rx_descs(sc, rxq);
   7278 		if (error)
   7279 			break;
   7280 
   7281 		error = wm_alloc_rx_buffer(sc, rxq);
   7282 		if (error) {
   7283 			wm_free_rx_descs(sc, rxq);
   7284 			break;
   7285 		}
   7286 
   7287 #ifdef WM_EVENT_COUNTERS
   7288 		xname = device_xname(sc->sc_dev);
   7289 
   7290 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   7291 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   7292 
   7293 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   7294 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   7295 #endif /* WM_EVENT_COUNTERS */
   7296 
   7297 		rx_done++;
   7298 	}
   7299 	if (error)
   7300 		goto fail_2;
   7301 
   7302 	return 0;
   7303 
   7304  fail_2:
   7305 	for (i = 0; i < rx_done; i++) {
   7306 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7307 		wm_free_rx_buffer(sc, rxq);
   7308 		wm_free_rx_descs(sc, rxq);
   7309 		if (rxq->rxq_lock)
   7310 			mutex_obj_free(rxq->rxq_lock);
   7311 	}
   7312  fail_1:
   7313 	for (i = 0; i < tx_done; i++) {
   7314 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7315 		pcq_destroy(txq->txq_interq);
   7316 		wm_free_tx_buffer(sc, txq);
   7317 		wm_free_tx_descs(sc, txq);
   7318 		if (txq->txq_lock)
   7319 			mutex_obj_free(txq->txq_lock);
   7320 	}
   7321 
   7322 	kmem_free(sc->sc_queue,
   7323 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7324  fail_0:
   7325 	return error;
   7326 }
   7327 
   7328 /*
   7329  * wm_free_quques:
   7330  *	Free {tx,rx}descs and {tx,rx} buffers
   7331  */
   7332 static void
   7333 wm_free_txrx_queues(struct wm_softc *sc)
   7334 {
   7335 	int i;
   7336 
   7337 	for (i = 0; i < sc->sc_nqueues; i++) {
   7338 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7339 
   7340 #ifdef WM_EVENT_COUNTERS
   7341 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7342 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7343 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7344 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7345 #endif /* WM_EVENT_COUNTERS */
   7346 
   7347 		wm_free_rx_buffer(sc, rxq);
   7348 		wm_free_rx_descs(sc, rxq);
   7349 		if (rxq->rxq_lock)
   7350 			mutex_obj_free(rxq->rxq_lock);
   7351 	}
   7352 
   7353 	for (i = 0; i < sc->sc_nqueues; i++) {
   7354 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7355 		struct mbuf *m;
   7356 #ifdef WM_EVENT_COUNTERS
   7357 		int j;
   7358 
   7359 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7360 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7361 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7362 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7363 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7364 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7365 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7366 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7367 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7368 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7369 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7370 
   7371 		for (j = 0; j < WM_NTXSEGS; j++)
   7372 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7373 
   7374 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7375 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7376 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7377 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7378 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7379 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
   7380 #endif /* WM_EVENT_COUNTERS */
   7381 
   7382 		/* Drain txq_interq */
   7383 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7384 			m_freem(m);
   7385 		pcq_destroy(txq->txq_interq);
   7386 
   7387 		wm_free_tx_buffer(sc, txq);
   7388 		wm_free_tx_descs(sc, txq);
   7389 		if (txq->txq_lock)
   7390 			mutex_obj_free(txq->txq_lock);
   7391 	}
   7392 
   7393 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7394 }
   7395 
   7396 static void
   7397 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7398 {
   7399 
   7400 	KASSERT(mutex_owned(txq->txq_lock));
   7401 
   7402 	/* Initialize the transmit descriptor ring. */
   7403 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7404 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7405 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7406 	txq->txq_free = WM_NTXDESC(txq);
   7407 	txq->txq_next = 0;
   7408 }
   7409 
   7410 static void
   7411 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7412     struct wm_txqueue *txq)
   7413 {
   7414 
   7415 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7416 		device_xname(sc->sc_dev), __func__));
   7417 	KASSERT(mutex_owned(txq->txq_lock));
   7418 
   7419 	if (sc->sc_type < WM_T_82543) {
   7420 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7421 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7422 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7423 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7424 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7425 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7426 	} else {
   7427 		int qid = wmq->wmq_id;
   7428 
   7429 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7430 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7431 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7432 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7433 
   7434 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7435 			/*
   7436 			 * Don't write TDT before TCTL.EN is set.
   7437 			 * See the document.
   7438 			 */
   7439 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7440 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7441 			    | TXDCTL_WTHRESH(0));
   7442 		else {
   7443 			/* XXX should update with AIM? */
   7444 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7445 			if (sc->sc_type >= WM_T_82540) {
   7446 				/* Should be the same */
   7447 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7448 			}
   7449 
   7450 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7451 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7452 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7453 		}
   7454 	}
   7455 }
   7456 
   7457 static void
   7458 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7459 {
   7460 	int i;
   7461 
   7462 	KASSERT(mutex_owned(txq->txq_lock));
   7463 
   7464 	/* Initialize the transmit job descriptors. */
   7465 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7466 		txq->txq_soft[i].txs_mbuf = NULL;
   7467 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7468 	txq->txq_snext = 0;
   7469 	txq->txq_sdirty = 0;
   7470 }
   7471 
   7472 static void
   7473 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7474     struct wm_txqueue *txq)
   7475 {
   7476 
   7477 	KASSERT(mutex_owned(txq->txq_lock));
   7478 
   7479 	/*
   7480 	 * Set up some register offsets that are different between
   7481 	 * the i82542 and the i82543 and later chips.
   7482 	 */
   7483 	if (sc->sc_type < WM_T_82543)
   7484 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7485 	else
   7486 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7487 
   7488 	wm_init_tx_descs(sc, txq);
   7489 	wm_init_tx_regs(sc, wmq, txq);
   7490 	wm_init_tx_buffer(sc, txq);
   7491 
   7492 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
   7493 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
   7494 
   7495 	txq->txq_sending = false;
   7496 }
   7497 
   7498 static void
   7499 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7500     struct wm_rxqueue *rxq)
   7501 {
   7502 
   7503 	KASSERT(mutex_owned(rxq->rxq_lock));
   7504 
   7505 	/*
   7506 	 * Initialize the receive descriptor and receive job
   7507 	 * descriptor rings.
   7508 	 */
   7509 	if (sc->sc_type < WM_T_82543) {
   7510 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7511 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7512 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7513 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7514 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7515 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7516 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7517 
   7518 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7519 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7520 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7521 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7522 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7523 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7524 	} else {
   7525 		int qid = wmq->wmq_id;
   7526 
   7527 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7528 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7529 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7530 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7531 
   7532 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7533 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7534 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7535 
   7536 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7537 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7538 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7539 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7540 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7541 			    | RXDCTL_WTHRESH(1));
   7542 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7543 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7544 		} else {
   7545 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7546 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7547 			/* XXX should update with AIM? */
   7548 			CSR_WRITE(sc, WMREG_RDTR,
   7549 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7550 			/* MUST be same */
   7551 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7552 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7553 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7554 		}
   7555 	}
   7556 }
   7557 
   7558 static int
   7559 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7560 {
   7561 	struct wm_rxsoft *rxs;
   7562 	int error, i;
   7563 
   7564 	KASSERT(mutex_owned(rxq->rxq_lock));
   7565 
   7566 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7567 		rxs = &rxq->rxq_soft[i];
   7568 		if (rxs->rxs_mbuf == NULL) {
   7569 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7570 				log(LOG_ERR, "%s: unable to allocate or map "
   7571 				    "rx buffer %d, error = %d\n",
   7572 				    device_xname(sc->sc_dev), i, error);
   7573 				/*
   7574 				 * XXX Should attempt to run with fewer receive
   7575 				 * XXX buffers instead of just failing.
   7576 				 */
   7577 				wm_rxdrain(rxq);
   7578 				return ENOMEM;
   7579 			}
   7580 		} else {
   7581 			/*
   7582 			 * For 82575 and 82576, the RX descriptors must be
   7583 			 * initialized after the setting of RCTL.EN in
   7584 			 * wm_set_filter()
   7585 			 */
   7586 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7587 				wm_init_rxdesc(rxq, i);
   7588 		}
   7589 	}
   7590 	rxq->rxq_ptr = 0;
   7591 	rxq->rxq_discard = 0;
   7592 	WM_RXCHAIN_RESET(rxq);
   7593 
   7594 	return 0;
   7595 }
   7596 
   7597 static int
   7598 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7599     struct wm_rxqueue *rxq)
   7600 {
   7601 
   7602 	KASSERT(mutex_owned(rxq->rxq_lock));
   7603 
   7604 	/*
   7605 	 * Set up some register offsets that are different between
   7606 	 * the i82542 and the i82543 and later chips.
   7607 	 */
   7608 	if (sc->sc_type < WM_T_82543)
   7609 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7610 	else
   7611 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7612 
   7613 	wm_init_rx_regs(sc, wmq, rxq);
   7614 	return wm_init_rx_buffer(sc, rxq);
   7615 }
   7616 
   7617 /*
   7618  * wm_init_quques:
   7619  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7620  */
   7621 static int
   7622 wm_init_txrx_queues(struct wm_softc *sc)
   7623 {
   7624 	int i, error = 0;
   7625 
   7626 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   7627 		device_xname(sc->sc_dev), __func__));
   7628 
   7629 	for (i = 0; i < sc->sc_nqueues; i++) {
   7630 		struct wm_queue *wmq = &sc->sc_queue[i];
   7631 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7632 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7633 
   7634 		/*
   7635 		 * TODO
   7636 		 * Currently, use constant variable instead of AIM.
   7637 		 * Furthermore, the interrupt interval of multiqueue which use
   7638 		 * polling mode is less than default value.
   7639 		 * More tuning and AIM are required.
   7640 		 */
   7641 		if (wm_is_using_multiqueue(sc))
   7642 			wmq->wmq_itr = 50;
   7643 		else
   7644 			wmq->wmq_itr = sc->sc_itr_init;
   7645 		wmq->wmq_set_itr = true;
   7646 
   7647 		mutex_enter(txq->txq_lock);
   7648 		wm_init_tx_queue(sc, wmq, txq);
   7649 		mutex_exit(txq->txq_lock);
   7650 
   7651 		mutex_enter(rxq->rxq_lock);
   7652 		error = wm_init_rx_queue(sc, wmq, rxq);
   7653 		mutex_exit(rxq->rxq_lock);
   7654 		if (error)
   7655 			break;
   7656 	}
   7657 
   7658 	return error;
   7659 }
   7660 
   7661 /*
   7662  * wm_tx_offload:
   7663  *
   7664  *	Set up TCP/IP checksumming parameters for the
   7665  *	specified packet.
   7666  */
   7667 static void
   7668 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7669     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7670 {
   7671 	struct mbuf *m0 = txs->txs_mbuf;
   7672 	struct livengood_tcpip_ctxdesc *t;
   7673 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7674 	uint32_t ipcse;
   7675 	struct ether_header *eh;
   7676 	int offset, iphl;
   7677 	uint8_t fields;
   7678 
   7679 	/*
   7680 	 * XXX It would be nice if the mbuf pkthdr had offset
   7681 	 * fields for the protocol headers.
   7682 	 */
   7683 
   7684 	eh = mtod(m0, struct ether_header *);
   7685 	switch (htons(eh->ether_type)) {
   7686 	case ETHERTYPE_IP:
   7687 	case ETHERTYPE_IPV6:
   7688 		offset = ETHER_HDR_LEN;
   7689 		break;
   7690 
   7691 	case ETHERTYPE_VLAN:
   7692 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7693 		break;
   7694 
   7695 	default:
   7696 		/* Don't support this protocol or encapsulation. */
   7697 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   7698 		txq->txq_last_hw_ipcs = 0;
   7699 		txq->txq_last_hw_tucs = 0;
   7700 		*fieldsp = 0;
   7701 		*cmdp = 0;
   7702 		return;
   7703 	}
   7704 
   7705 	if ((m0->m_pkthdr.csum_flags &
   7706 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7707 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7708 	} else
   7709 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7710 
   7711 	ipcse = offset + iphl - 1;
   7712 
   7713 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7714 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7715 	seg = 0;
   7716 	fields = 0;
   7717 
   7718 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7719 		int hlen = offset + iphl;
   7720 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7721 
   7722 		if (__predict_false(m0->m_len <
   7723 				    (hlen + sizeof(struct tcphdr)))) {
   7724 			/*
   7725 			 * TCP/IP headers are not in the first mbuf; we need
   7726 			 * to do this the slow and painful way. Let's just
   7727 			 * hope this doesn't happen very often.
   7728 			 */
   7729 			struct tcphdr th;
   7730 
   7731 			WM_Q_EVCNT_INCR(txq, tsopain);
   7732 
   7733 			m_copydata(m0, hlen, sizeof(th), &th);
   7734 			if (v4) {
   7735 				struct ip ip;
   7736 
   7737 				m_copydata(m0, offset, sizeof(ip), &ip);
   7738 				ip.ip_len = 0;
   7739 				m_copyback(m0,
   7740 				    offset + offsetof(struct ip, ip_len),
   7741 				    sizeof(ip.ip_len), &ip.ip_len);
   7742 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7743 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7744 			} else {
   7745 				struct ip6_hdr ip6;
   7746 
   7747 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7748 				ip6.ip6_plen = 0;
   7749 				m_copyback(m0,
   7750 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7751 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7752 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7753 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7754 			}
   7755 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7756 			    sizeof(th.th_sum), &th.th_sum);
   7757 
   7758 			hlen += th.th_off << 2;
   7759 		} else {
   7760 			/*
   7761 			 * TCP/IP headers are in the first mbuf; we can do
   7762 			 * this the easy way.
   7763 			 */
   7764 			struct tcphdr *th;
   7765 
   7766 			if (v4) {
   7767 				struct ip *ip =
   7768 				    (void *)(mtod(m0, char *) + offset);
   7769 				th = (void *)(mtod(m0, char *) + hlen);
   7770 
   7771 				ip->ip_len = 0;
   7772 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7773 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7774 			} else {
   7775 				struct ip6_hdr *ip6 =
   7776 				    (void *)(mtod(m0, char *) + offset);
   7777 				th = (void *)(mtod(m0, char *) + hlen);
   7778 
   7779 				ip6->ip6_plen = 0;
   7780 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7781 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7782 			}
   7783 			hlen += th->th_off << 2;
   7784 		}
   7785 
   7786 		if (v4) {
   7787 			WM_Q_EVCNT_INCR(txq, tso);
   7788 			cmdlen |= WTX_TCPIP_CMD_IP;
   7789 		} else {
   7790 			WM_Q_EVCNT_INCR(txq, tso6);
   7791 			ipcse = 0;
   7792 		}
   7793 		cmd |= WTX_TCPIP_CMD_TSE;
   7794 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7795 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7796 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7797 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7798 	}
   7799 
   7800 	/*
   7801 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7802 	 * offload feature, if we load the context descriptor, we
   7803 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7804 	 */
   7805 
   7806 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7807 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7808 	    WTX_TCPIP_IPCSE(ipcse);
   7809 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7810 		WM_Q_EVCNT_INCR(txq, ipsum);
   7811 		fields |= WTX_IXSM;
   7812 	}
   7813 
   7814 	offset += iphl;
   7815 
   7816 	if (m0->m_pkthdr.csum_flags &
   7817 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7818 		WM_Q_EVCNT_INCR(txq, tusum);
   7819 		fields |= WTX_TXSM;
   7820 		tucs = WTX_TCPIP_TUCSS(offset) |
   7821 		    WTX_TCPIP_TUCSO(offset +
   7822 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7823 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7824 	} else if ((m0->m_pkthdr.csum_flags &
   7825 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7826 		WM_Q_EVCNT_INCR(txq, tusum6);
   7827 		fields |= WTX_TXSM;
   7828 		tucs = WTX_TCPIP_TUCSS(offset) |
   7829 		    WTX_TCPIP_TUCSO(offset +
   7830 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7831 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7832 	} else {
   7833 		/* Just initialize it to a valid TCP context. */
   7834 		tucs = WTX_TCPIP_TUCSS(offset) |
   7835 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7836 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7837 	}
   7838 
   7839 	*cmdp = cmd;
   7840 	*fieldsp = fields;
   7841 
   7842 	/*
   7843 	 * We don't have to write context descriptor for every packet
   7844 	 * except for 82574. For 82574, we must write context descriptor
   7845 	 * for every packet when we use two descriptor queues.
   7846 	 *
   7847 	 * The 82574L can only remember the *last* context used
   7848 	 * regardless of queue that it was use for.  We cannot reuse
   7849 	 * contexts on this hardware platform and must generate a new
   7850 	 * context every time.  82574L hardware spec, section 7.2.6,
   7851 	 * second note.
   7852 	 */
   7853 	if (sc->sc_nqueues < 2) {
   7854 		/*
   7855 		 * Setting up new checksum offload context for every
   7856 		 * frames takes a lot of processing time for hardware.
   7857 		 * This also reduces performance a lot for small sized
   7858 		 * frames so avoid it if driver can use previously
   7859 		 * configured checksum offload context.
   7860 		 * For TSO, in theory we can use the same TSO context only if
   7861 		 * frame is the same type(IP/TCP) and the same MSS. However
   7862 		 * checking whether a frame has the same IP/TCP structure is a
   7863 		 * hard thing so just ignore that and always restablish a
   7864 		 * new TSO context.
   7865 		 */
   7866 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
   7867 		    == 0) {
   7868 			if (txq->txq_last_hw_cmd == cmd &&
   7869 			    txq->txq_last_hw_fields == fields &&
   7870 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
   7871 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
   7872 				WM_Q_EVCNT_INCR(txq, skipcontext);
   7873 				return;
   7874 			}
   7875 		}
   7876 
   7877 		txq->txq_last_hw_cmd = cmd;
   7878 		txq->txq_last_hw_fields = fields;
   7879 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
   7880 		txq->txq_last_hw_tucs = (tucs & 0xffff);
   7881 	}
   7882 
   7883 	/* Fill in the context descriptor. */
   7884 	t = (struct livengood_tcpip_ctxdesc *)
   7885 	    &txq->txq_descs[txq->txq_next];
   7886 	t->tcpip_ipcs = htole32(ipcs);
   7887 	t->tcpip_tucs = htole32(tucs);
   7888 	t->tcpip_cmdlen = htole32(cmdlen);
   7889 	t->tcpip_seg = htole32(seg);
   7890 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7891 
   7892 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7893 	txs->txs_ndesc++;
   7894 }
   7895 
   7896 static inline int
   7897 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7898 {
   7899 	struct wm_softc *sc = ifp->if_softc;
   7900 	u_int cpuid = cpu_index(curcpu());
   7901 
   7902 	/*
   7903 	 * Currently, simple distribute strategy.
   7904 	 * TODO:
   7905 	 * distribute by flowid(RSS has value).
   7906 	 */
   7907 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7908 }
   7909 
   7910 static inline bool
   7911 wm_linkdown_discard(struct wm_txqueue *txq)
   7912 {
   7913 
   7914 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
   7915 		return true;
   7916 
   7917 	return false;
   7918 }
   7919 
   7920 /*
   7921  * wm_start:		[ifnet interface function]
   7922  *
   7923  *	Start packet transmission on the interface.
   7924  */
   7925 static void
   7926 wm_start(struct ifnet *ifp)
   7927 {
   7928 	struct wm_softc *sc = ifp->if_softc;
   7929 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7930 
   7931 #ifdef WM_MPSAFE
   7932 	KASSERT(if_is_mpsafe(ifp));
   7933 #endif
   7934 	/*
   7935 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7936 	 */
   7937 
   7938 	mutex_enter(txq->txq_lock);
   7939 	if (!txq->txq_stopping)
   7940 		wm_start_locked(ifp);
   7941 	mutex_exit(txq->txq_lock);
   7942 }
   7943 
   7944 static void
   7945 wm_start_locked(struct ifnet *ifp)
   7946 {
   7947 	struct wm_softc *sc = ifp->if_softc;
   7948 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7949 
   7950 	wm_send_common_locked(ifp, txq, false);
   7951 }
   7952 
   7953 static int
   7954 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7955 {
   7956 	int qid;
   7957 	struct wm_softc *sc = ifp->if_softc;
   7958 	struct wm_txqueue *txq;
   7959 
   7960 	qid = wm_select_txqueue(ifp, m);
   7961 	txq = &sc->sc_queue[qid].wmq_txq;
   7962 
   7963 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7964 		m_freem(m);
   7965 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7966 		return ENOBUFS;
   7967 	}
   7968 
   7969 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7970 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7971 	if (m->m_flags & M_MCAST)
   7972 		if_statinc_ref(nsr, if_omcasts);
   7973 	IF_STAT_PUTREF(ifp);
   7974 
   7975 	if (mutex_tryenter(txq->txq_lock)) {
   7976 		if (!txq->txq_stopping)
   7977 			wm_transmit_locked(ifp, txq);
   7978 		mutex_exit(txq->txq_lock);
   7979 	}
   7980 
   7981 	return 0;
   7982 }
   7983 
   7984 static void
   7985 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7986 {
   7987 
   7988 	wm_send_common_locked(ifp, txq, true);
   7989 }
   7990 
   7991 static void
   7992 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7993     bool is_transmit)
   7994 {
   7995 	struct wm_softc *sc = ifp->if_softc;
   7996 	struct mbuf *m0;
   7997 	struct wm_txsoft *txs;
   7998 	bus_dmamap_t dmamap;
   7999 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   8000 	bus_addr_t curaddr;
   8001 	bus_size_t seglen, curlen;
   8002 	uint32_t cksumcmd;
   8003 	uint8_t cksumfields;
   8004 	bool remap = true;
   8005 
   8006 	KASSERT(mutex_owned(txq->txq_lock));
   8007 
   8008 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8009 		return;
   8010 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8011 		return;
   8012 
   8013 	if (__predict_false(wm_linkdown_discard(txq))) {
   8014 		do {
   8015 			if (is_transmit)
   8016 				m0 = pcq_get(txq->txq_interq);
   8017 			else
   8018 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8019 			/*
   8020 			 * increment successed packet counter as in the case
   8021 			 * which the packet is discarded by link down PHY.
   8022 			 */
   8023 			if (m0 != NULL) {
   8024 				if_statinc(ifp, if_opackets);
   8025 				m_freem(m0);
   8026 			}
   8027 		} while (m0 != NULL);
   8028 		return;
   8029 	}
   8030 
   8031 	/* Remember the previous number of free descriptors. */
   8032 	ofree = txq->txq_free;
   8033 
   8034 	/*
   8035 	 * Loop through the send queue, setting up transmit descriptors
   8036 	 * until we drain the queue, or use up all available transmit
   8037 	 * descriptors.
   8038 	 */
   8039 	for (;;) {
   8040 		m0 = NULL;
   8041 
   8042 		/* Get a work queue entry. */
   8043 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8044 			wm_txeof(txq, UINT_MAX);
   8045 			if (txq->txq_sfree == 0) {
   8046 				DPRINTF(sc, WM_DEBUG_TX,
   8047 				    ("%s: TX: no free job descriptors\n",
   8048 					device_xname(sc->sc_dev)));
   8049 				WM_Q_EVCNT_INCR(txq, txsstall);
   8050 				break;
   8051 			}
   8052 		}
   8053 
   8054 		/* Grab a packet off the queue. */
   8055 		if (is_transmit)
   8056 			m0 = pcq_get(txq->txq_interq);
   8057 		else
   8058 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8059 		if (m0 == NULL)
   8060 			break;
   8061 
   8062 		DPRINTF(sc, WM_DEBUG_TX,
   8063 		    ("%s: TX: have packet to transmit: %p\n",
   8064 			device_xname(sc->sc_dev), m0));
   8065 
   8066 		txs = &txq->txq_soft[txq->txq_snext];
   8067 		dmamap = txs->txs_dmamap;
   8068 
   8069 		use_tso = (m0->m_pkthdr.csum_flags &
   8070 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   8071 
   8072 		/*
   8073 		 * So says the Linux driver:
   8074 		 * The controller does a simple calculation to make sure
   8075 		 * there is enough room in the FIFO before initiating the
   8076 		 * DMA for each buffer. The calc is:
   8077 		 *	4 = ceil(buffer len / MSS)
   8078 		 * To make sure we don't overrun the FIFO, adjust the max
   8079 		 * buffer len if the MSS drops.
   8080 		 */
   8081 		dmamap->dm_maxsegsz =
   8082 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   8083 		    ? m0->m_pkthdr.segsz << 2
   8084 		    : WTX_MAX_LEN;
   8085 
   8086 		/*
   8087 		 * Load the DMA map.  If this fails, the packet either
   8088 		 * didn't fit in the allotted number of segments, or we
   8089 		 * were short on resources.  For the too-many-segments
   8090 		 * case, we simply report an error and drop the packet,
   8091 		 * since we can't sanely copy a jumbo packet to a single
   8092 		 * buffer.
   8093 		 */
   8094 retry:
   8095 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8096 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8097 		if (__predict_false(error)) {
   8098 			if (error == EFBIG) {
   8099 				if (remap == true) {
   8100 					struct mbuf *m;
   8101 
   8102 					remap = false;
   8103 					m = m_defrag(m0, M_NOWAIT);
   8104 					if (m != NULL) {
   8105 						WM_Q_EVCNT_INCR(txq, defrag);
   8106 						m0 = m;
   8107 						goto retry;
   8108 					}
   8109 				}
   8110 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8111 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8112 				    "DMA segments, dropping...\n",
   8113 				    device_xname(sc->sc_dev));
   8114 				wm_dump_mbuf_chain(sc, m0);
   8115 				m_freem(m0);
   8116 				continue;
   8117 			}
   8118 			/* Short on resources, just stop for now. */
   8119 			DPRINTF(sc, WM_DEBUG_TX,
   8120 			    ("%s: TX: dmamap load failed: %d\n",
   8121 				device_xname(sc->sc_dev), error));
   8122 			break;
   8123 		}
   8124 
   8125 		segs_needed = dmamap->dm_nsegs;
   8126 		if (use_tso) {
   8127 			/* For sentinel descriptor; see below. */
   8128 			segs_needed++;
   8129 		}
   8130 
   8131 		/*
   8132 		 * Ensure we have enough descriptors free to describe
   8133 		 * the packet. Note, we always reserve one descriptor
   8134 		 * at the end of the ring due to the semantics of the
   8135 		 * TDT register, plus one more in the event we need
   8136 		 * to load offload context.
   8137 		 */
   8138 		if (segs_needed > txq->txq_free - 2) {
   8139 			/*
   8140 			 * Not enough free descriptors to transmit this
   8141 			 * packet.  We haven't committed anything yet,
   8142 			 * so just unload the DMA map, put the packet
   8143 			 * pack on the queue, and punt. Notify the upper
   8144 			 * layer that there are no more slots left.
   8145 			 */
   8146 			DPRINTF(sc, WM_DEBUG_TX,
   8147 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8148 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8149 				segs_needed, txq->txq_free - 1));
   8150 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8151 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8152 			WM_Q_EVCNT_INCR(txq, txdstall);
   8153 			break;
   8154 		}
   8155 
   8156 		/*
   8157 		 * Check for 82547 Tx FIFO bug. We need to do this
   8158 		 * once we know we can transmit the packet, since we
   8159 		 * do some internal FIFO space accounting here.
   8160 		 */
   8161 		if (sc->sc_type == WM_T_82547 &&
   8162 		    wm_82547_txfifo_bugchk(sc, m0)) {
   8163 			DPRINTF(sc, WM_DEBUG_TX,
   8164 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   8165 				device_xname(sc->sc_dev)));
   8166 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8167 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8168 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   8169 			break;
   8170 		}
   8171 
   8172 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8173 
   8174 		DPRINTF(sc, WM_DEBUG_TX,
   8175 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8176 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8177 
   8178 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8179 
   8180 		/*
   8181 		 * Store a pointer to the packet so that we can free it
   8182 		 * later.
   8183 		 *
   8184 		 * Initially, we consider the number of descriptors the
   8185 		 * packet uses the number of DMA segments.  This may be
   8186 		 * incremented by 1 if we do checksum offload (a descriptor
   8187 		 * is used to set the checksum context).
   8188 		 */
   8189 		txs->txs_mbuf = m0;
   8190 		txs->txs_firstdesc = txq->txq_next;
   8191 		txs->txs_ndesc = segs_needed;
   8192 
   8193 		/* Set up offload parameters for this packet. */
   8194 		if (m0->m_pkthdr.csum_flags &
   8195 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8196 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8197 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8198 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
   8199 		} else {
   8200 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
   8201 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
   8202 			cksumcmd = 0;
   8203 			cksumfields = 0;
   8204 		}
   8205 
   8206 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   8207 
   8208 		/* Sync the DMA map. */
   8209 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8210 		    BUS_DMASYNC_PREWRITE);
   8211 
   8212 		/* Initialize the transmit descriptor. */
   8213 		for (nexttx = txq->txq_next, seg = 0;
   8214 		     seg < dmamap->dm_nsegs; seg++) {
   8215 			for (seglen = dmamap->dm_segs[seg].ds_len,
   8216 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   8217 			     seglen != 0;
   8218 			     curaddr += curlen, seglen -= curlen,
   8219 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   8220 				curlen = seglen;
   8221 
   8222 				/*
   8223 				 * So says the Linux driver:
   8224 				 * Work around for premature descriptor
   8225 				 * write-backs in TSO mode.  Append a
   8226 				 * 4-byte sentinel descriptor.
   8227 				 */
   8228 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   8229 				    curlen > 8)
   8230 					curlen -= 4;
   8231 
   8232 				wm_set_dma_addr(
   8233 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   8234 				txq->txq_descs[nexttx].wtx_cmdlen
   8235 				    = htole32(cksumcmd | curlen);
   8236 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   8237 				    = 0;
   8238 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   8239 				    = cksumfields;
   8240 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8241 				lasttx = nexttx;
   8242 
   8243 				DPRINTF(sc, WM_DEBUG_TX,
   8244 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   8245 					"len %#04zx\n",
   8246 					device_xname(sc->sc_dev), nexttx,
   8247 					(uint64_t)curaddr, curlen));
   8248 			}
   8249 		}
   8250 
   8251 		KASSERT(lasttx != -1);
   8252 
   8253 		/*
   8254 		 * Set up the command byte on the last descriptor of
   8255 		 * the packet. If we're in the interrupt delay window,
   8256 		 * delay the interrupt.
   8257 		 */
   8258 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8259 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8260 
   8261 		/*
   8262 		 * If VLANs are enabled and the packet has a VLAN tag, set
   8263 		 * up the descriptor to encapsulate the packet for us.
   8264 		 *
   8265 		 * This is only valid on the last descriptor of the packet.
   8266 		 */
   8267 		if (vlan_has_tag(m0)) {
   8268 			txq->txq_descs[lasttx].wtx_cmdlen |=
   8269 			    htole32(WTX_CMD_VLE);
   8270 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   8271 			    = htole16(vlan_get_tag(m0));
   8272 		}
   8273 
   8274 		txs->txs_lastdesc = lasttx;
   8275 
   8276 		DPRINTF(sc, WM_DEBUG_TX,
   8277 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8278 			device_xname(sc->sc_dev),
   8279 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8280 
   8281 		/* Sync the descriptors we're using. */
   8282 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8283 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8284 
   8285 		/* Give the packet to the chip. */
   8286 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8287 
   8288 		DPRINTF(sc, WM_DEBUG_TX,
   8289 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8290 
   8291 		DPRINTF(sc, WM_DEBUG_TX,
   8292 		    ("%s: TX: finished transmitting packet, job %d\n",
   8293 			device_xname(sc->sc_dev), txq->txq_snext));
   8294 
   8295 		/* Advance the tx pointer. */
   8296 		txq->txq_free -= txs->txs_ndesc;
   8297 		txq->txq_next = nexttx;
   8298 
   8299 		txq->txq_sfree--;
   8300 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8301 
   8302 		/* Pass the packet to any BPF listeners. */
   8303 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8304 	}
   8305 
   8306 	if (m0 != NULL) {
   8307 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8308 		WM_Q_EVCNT_INCR(txq, descdrop);
   8309 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8310 			__func__));
   8311 		m_freem(m0);
   8312 	}
   8313 
   8314 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8315 		/* No more slots; notify upper layer. */
   8316 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8317 	}
   8318 
   8319 	if (txq->txq_free != ofree) {
   8320 		/* Set a watchdog timer in case the chip flakes out. */
   8321 		txq->txq_lastsent = time_uptime;
   8322 		txq->txq_sending = true;
   8323 	}
   8324 }
   8325 
   8326 /*
   8327  * wm_nq_tx_offload:
   8328  *
   8329  *	Set up TCP/IP checksumming parameters for the
   8330  *	specified packet, for NEWQUEUE devices
   8331  */
   8332 static void
   8333 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   8334     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   8335 {
   8336 	struct mbuf *m0 = txs->txs_mbuf;
   8337 	uint32_t vl_len, mssidx, cmdc;
   8338 	struct ether_header *eh;
   8339 	int offset, iphl;
   8340 
   8341 	/*
   8342 	 * XXX It would be nice if the mbuf pkthdr had offset
   8343 	 * fields for the protocol headers.
   8344 	 */
   8345 	*cmdlenp = 0;
   8346 	*fieldsp = 0;
   8347 
   8348 	eh = mtod(m0, struct ether_header *);
   8349 	switch (htons(eh->ether_type)) {
   8350 	case ETHERTYPE_IP:
   8351 	case ETHERTYPE_IPV6:
   8352 		offset = ETHER_HDR_LEN;
   8353 		break;
   8354 
   8355 	case ETHERTYPE_VLAN:
   8356 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   8357 		break;
   8358 
   8359 	default:
   8360 		/* Don't support this protocol or encapsulation. */
   8361 		*do_csum = false;
   8362 		return;
   8363 	}
   8364 	*do_csum = true;
   8365 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8366 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8367 
   8368 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8369 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8370 
   8371 	if ((m0->m_pkthdr.csum_flags &
   8372 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8373 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8374 	} else {
   8375 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8376 	}
   8377 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8378 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8379 
   8380 	if (vlan_has_tag(m0)) {
   8381 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8382 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8383 		*cmdlenp |= NQTX_CMD_VLE;
   8384 	}
   8385 
   8386 	mssidx = 0;
   8387 
   8388 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8389 		int hlen = offset + iphl;
   8390 		int tcp_hlen;
   8391 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8392 
   8393 		if (__predict_false(m0->m_len <
   8394 				    (hlen + sizeof(struct tcphdr)))) {
   8395 			/*
   8396 			 * TCP/IP headers are not in the first mbuf; we need
   8397 			 * to do this the slow and painful way. Let's just
   8398 			 * hope this doesn't happen very often.
   8399 			 */
   8400 			struct tcphdr th;
   8401 
   8402 			WM_Q_EVCNT_INCR(txq, tsopain);
   8403 
   8404 			m_copydata(m0, hlen, sizeof(th), &th);
   8405 			if (v4) {
   8406 				struct ip ip;
   8407 
   8408 				m_copydata(m0, offset, sizeof(ip), &ip);
   8409 				ip.ip_len = 0;
   8410 				m_copyback(m0,
   8411 				    offset + offsetof(struct ip, ip_len),
   8412 				    sizeof(ip.ip_len), &ip.ip_len);
   8413 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8414 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8415 			} else {
   8416 				struct ip6_hdr ip6;
   8417 
   8418 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8419 				ip6.ip6_plen = 0;
   8420 				m_copyback(m0,
   8421 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8422 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8423 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8424 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8425 			}
   8426 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8427 			    sizeof(th.th_sum), &th.th_sum);
   8428 
   8429 			tcp_hlen = th.th_off << 2;
   8430 		} else {
   8431 			/*
   8432 			 * TCP/IP headers are in the first mbuf; we can do
   8433 			 * this the easy way.
   8434 			 */
   8435 			struct tcphdr *th;
   8436 
   8437 			if (v4) {
   8438 				struct ip *ip =
   8439 				    (void *)(mtod(m0, char *) + offset);
   8440 				th = (void *)(mtod(m0, char *) + hlen);
   8441 
   8442 				ip->ip_len = 0;
   8443 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8444 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8445 			} else {
   8446 				struct ip6_hdr *ip6 =
   8447 				    (void *)(mtod(m0, char *) + offset);
   8448 				th = (void *)(mtod(m0, char *) + hlen);
   8449 
   8450 				ip6->ip6_plen = 0;
   8451 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8452 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8453 			}
   8454 			tcp_hlen = th->th_off << 2;
   8455 		}
   8456 		hlen += tcp_hlen;
   8457 		*cmdlenp |= NQTX_CMD_TSE;
   8458 
   8459 		if (v4) {
   8460 			WM_Q_EVCNT_INCR(txq, tso);
   8461 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8462 		} else {
   8463 			WM_Q_EVCNT_INCR(txq, tso6);
   8464 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8465 		}
   8466 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8467 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8468 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8469 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8470 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8471 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8472 	} else {
   8473 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8474 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8475 	}
   8476 
   8477 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8478 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8479 		cmdc |= NQTXC_CMD_IP4;
   8480 	}
   8481 
   8482 	if (m0->m_pkthdr.csum_flags &
   8483 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8484 		WM_Q_EVCNT_INCR(txq, tusum);
   8485 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8486 			cmdc |= NQTXC_CMD_TCP;
   8487 		else
   8488 			cmdc |= NQTXC_CMD_UDP;
   8489 
   8490 		cmdc |= NQTXC_CMD_IP4;
   8491 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8492 	}
   8493 	if (m0->m_pkthdr.csum_flags &
   8494 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8495 		WM_Q_EVCNT_INCR(txq, tusum6);
   8496 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8497 			cmdc |= NQTXC_CMD_TCP;
   8498 		else
   8499 			cmdc |= NQTXC_CMD_UDP;
   8500 
   8501 		cmdc |= NQTXC_CMD_IP6;
   8502 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8503 	}
   8504 
   8505 	/*
   8506 	 * We don't have to write context descriptor for every packet to
   8507 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8508 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8509 	 * controllers.
   8510 	 * It would be overhead to write context descriptor for every packet,
   8511 	 * however it does not cause problems.
   8512 	 */
   8513 	/* Fill in the context descriptor. */
   8514 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8515 	    htole32(vl_len);
   8516 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8517 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8518 	    htole32(cmdc);
   8519 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8520 	    htole32(mssidx);
   8521 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8522 	DPRINTF(sc, WM_DEBUG_TX,
   8523 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8524 		txq->txq_next, 0, vl_len));
   8525 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8526 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8527 	txs->txs_ndesc++;
   8528 }
   8529 
   8530 /*
   8531  * wm_nq_start:		[ifnet interface function]
   8532  *
   8533  *	Start packet transmission on the interface for NEWQUEUE devices
   8534  */
   8535 static void
   8536 wm_nq_start(struct ifnet *ifp)
   8537 {
   8538 	struct wm_softc *sc = ifp->if_softc;
   8539 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8540 
   8541 #ifdef WM_MPSAFE
   8542 	KASSERT(if_is_mpsafe(ifp));
   8543 #endif
   8544 	/*
   8545 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8546 	 */
   8547 
   8548 	mutex_enter(txq->txq_lock);
   8549 	if (!txq->txq_stopping)
   8550 		wm_nq_start_locked(ifp);
   8551 	mutex_exit(txq->txq_lock);
   8552 }
   8553 
   8554 static void
   8555 wm_nq_start_locked(struct ifnet *ifp)
   8556 {
   8557 	struct wm_softc *sc = ifp->if_softc;
   8558 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8559 
   8560 	wm_nq_send_common_locked(ifp, txq, false);
   8561 }
   8562 
   8563 static int
   8564 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8565 {
   8566 	int qid;
   8567 	struct wm_softc *sc = ifp->if_softc;
   8568 	struct wm_txqueue *txq;
   8569 
   8570 	qid = wm_select_txqueue(ifp, m);
   8571 	txq = &sc->sc_queue[qid].wmq_txq;
   8572 
   8573 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8574 		m_freem(m);
   8575 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8576 		return ENOBUFS;
   8577 	}
   8578 
   8579 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8580 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8581 	if (m->m_flags & M_MCAST)
   8582 		if_statinc_ref(nsr, if_omcasts);
   8583 	IF_STAT_PUTREF(ifp);
   8584 
   8585 	/*
   8586 	 * The situations which this mutex_tryenter() fails at running time
   8587 	 * are below two patterns.
   8588 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8589 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8590 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8591 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8592 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8593 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8594 	 * stuck, either.
   8595 	 */
   8596 	if (mutex_tryenter(txq->txq_lock)) {
   8597 		if (!txq->txq_stopping)
   8598 			wm_nq_transmit_locked(ifp, txq);
   8599 		mutex_exit(txq->txq_lock);
   8600 	}
   8601 
   8602 	return 0;
   8603 }
   8604 
   8605 static void
   8606 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8607 {
   8608 
   8609 	wm_nq_send_common_locked(ifp, txq, true);
   8610 }
   8611 
   8612 static void
   8613 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8614     bool is_transmit)
   8615 {
   8616 	struct wm_softc *sc = ifp->if_softc;
   8617 	struct mbuf *m0;
   8618 	struct wm_txsoft *txs;
   8619 	bus_dmamap_t dmamap;
   8620 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8621 	bool do_csum, sent;
   8622 	bool remap = true;
   8623 
   8624 	KASSERT(mutex_owned(txq->txq_lock));
   8625 
   8626 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8627 		return;
   8628 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8629 		return;
   8630 
   8631 	if (__predict_false(wm_linkdown_discard(txq))) {
   8632 		do {
   8633 			if (is_transmit)
   8634 				m0 = pcq_get(txq->txq_interq);
   8635 			else
   8636 				IFQ_DEQUEUE(&ifp->if_snd, m0);
   8637 			/*
   8638 			 * increment successed packet counter as in the case
   8639 			 * which the packet is discarded by link down PHY.
   8640 			 */
   8641 			if (m0 != NULL) {
   8642 				if_statinc(ifp, if_opackets);
   8643 				m_freem(m0);
   8644 			}
   8645 		} while (m0 != NULL);
   8646 		return;
   8647 	}
   8648 
   8649 	sent = false;
   8650 
   8651 	/*
   8652 	 * Loop through the send queue, setting up transmit descriptors
   8653 	 * until we drain the queue, or use up all available transmit
   8654 	 * descriptors.
   8655 	 */
   8656 	for (;;) {
   8657 		m0 = NULL;
   8658 
   8659 		/* Get a work queue entry. */
   8660 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8661 			wm_txeof(txq, UINT_MAX);
   8662 			if (txq->txq_sfree == 0) {
   8663 				DPRINTF(sc, WM_DEBUG_TX,
   8664 				    ("%s: TX: no free job descriptors\n",
   8665 					device_xname(sc->sc_dev)));
   8666 				WM_Q_EVCNT_INCR(txq, txsstall);
   8667 				break;
   8668 			}
   8669 		}
   8670 
   8671 		/* Grab a packet off the queue. */
   8672 		if (is_transmit)
   8673 			m0 = pcq_get(txq->txq_interq);
   8674 		else
   8675 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8676 		if (m0 == NULL)
   8677 			break;
   8678 
   8679 		DPRINTF(sc, WM_DEBUG_TX,
   8680 		    ("%s: TX: have packet to transmit: %p\n",
   8681 		    device_xname(sc->sc_dev), m0));
   8682 
   8683 		txs = &txq->txq_soft[txq->txq_snext];
   8684 		dmamap = txs->txs_dmamap;
   8685 
   8686 		/*
   8687 		 * Load the DMA map.  If this fails, the packet either
   8688 		 * didn't fit in the allotted number of segments, or we
   8689 		 * were short on resources.  For the too-many-segments
   8690 		 * case, we simply report an error and drop the packet,
   8691 		 * since we can't sanely copy a jumbo packet to a single
   8692 		 * buffer.
   8693 		 */
   8694 retry:
   8695 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8696 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8697 		if (__predict_false(error)) {
   8698 			if (error == EFBIG) {
   8699 				if (remap == true) {
   8700 					struct mbuf *m;
   8701 
   8702 					remap = false;
   8703 					m = m_defrag(m0, M_NOWAIT);
   8704 					if (m != NULL) {
   8705 						WM_Q_EVCNT_INCR(txq, defrag);
   8706 						m0 = m;
   8707 						goto retry;
   8708 					}
   8709 				}
   8710 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8711 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8712 				    "DMA segments, dropping...\n",
   8713 				    device_xname(sc->sc_dev));
   8714 				wm_dump_mbuf_chain(sc, m0);
   8715 				m_freem(m0);
   8716 				continue;
   8717 			}
   8718 			/* Short on resources, just stop for now. */
   8719 			DPRINTF(sc, WM_DEBUG_TX,
   8720 			    ("%s: TX: dmamap load failed: %d\n",
   8721 				device_xname(sc->sc_dev), error));
   8722 			break;
   8723 		}
   8724 
   8725 		segs_needed = dmamap->dm_nsegs;
   8726 
   8727 		/*
   8728 		 * Ensure we have enough descriptors free to describe
   8729 		 * the packet. Note, we always reserve one descriptor
   8730 		 * at the end of the ring due to the semantics of the
   8731 		 * TDT register, plus one more in the event we need
   8732 		 * to load offload context.
   8733 		 */
   8734 		if (segs_needed > txq->txq_free - 2) {
   8735 			/*
   8736 			 * Not enough free descriptors to transmit this
   8737 			 * packet.  We haven't committed anything yet,
   8738 			 * so just unload the DMA map, put the packet
   8739 			 * pack on the queue, and punt. Notify the upper
   8740 			 * layer that there are no more slots left.
   8741 			 */
   8742 			DPRINTF(sc, WM_DEBUG_TX,
   8743 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8744 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8745 				segs_needed, txq->txq_free - 1));
   8746 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8747 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8748 			WM_Q_EVCNT_INCR(txq, txdstall);
   8749 			break;
   8750 		}
   8751 
   8752 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8753 
   8754 		DPRINTF(sc, WM_DEBUG_TX,
   8755 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8756 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8757 
   8758 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8759 
   8760 		/*
   8761 		 * Store a pointer to the packet so that we can free it
   8762 		 * later.
   8763 		 *
   8764 		 * Initially, we consider the number of descriptors the
   8765 		 * packet uses the number of DMA segments.  This may be
   8766 		 * incremented by 1 if we do checksum offload (a descriptor
   8767 		 * is used to set the checksum context).
   8768 		 */
   8769 		txs->txs_mbuf = m0;
   8770 		txs->txs_firstdesc = txq->txq_next;
   8771 		txs->txs_ndesc = segs_needed;
   8772 
   8773 		/* Set up offload parameters for this packet. */
   8774 		uint32_t cmdlen, fields, dcmdlen;
   8775 		if (m0->m_pkthdr.csum_flags &
   8776 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8777 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8778 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8779 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8780 			    &do_csum);
   8781 		} else {
   8782 			do_csum = false;
   8783 			cmdlen = 0;
   8784 			fields = 0;
   8785 		}
   8786 
   8787 		/* Sync the DMA map. */
   8788 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8789 		    BUS_DMASYNC_PREWRITE);
   8790 
   8791 		/* Initialize the first transmit descriptor. */
   8792 		nexttx = txq->txq_next;
   8793 		if (!do_csum) {
   8794 			/* Set up a legacy descriptor */
   8795 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8796 			    dmamap->dm_segs[0].ds_addr);
   8797 			txq->txq_descs[nexttx].wtx_cmdlen =
   8798 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8799 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8800 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8801 			if (vlan_has_tag(m0)) {
   8802 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8803 				    htole32(WTX_CMD_VLE);
   8804 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8805 				    htole16(vlan_get_tag(m0));
   8806 			} else
   8807 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8808 
   8809 			dcmdlen = 0;
   8810 		} else {
   8811 			/* Set up an advanced data descriptor */
   8812 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8813 			    htole64(dmamap->dm_segs[0].ds_addr);
   8814 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8815 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8816 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8817 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8818 			    htole32(fields);
   8819 			DPRINTF(sc, WM_DEBUG_TX,
   8820 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8821 				device_xname(sc->sc_dev), nexttx,
   8822 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8823 			DPRINTF(sc, WM_DEBUG_TX,
   8824 			    ("\t 0x%08x%08x\n", fields,
   8825 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8826 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8827 		}
   8828 
   8829 		lasttx = nexttx;
   8830 		nexttx = WM_NEXTTX(txq, nexttx);
   8831 		/*
   8832 		 * Fill in the next descriptors. Legacy or advanced format
   8833 		 * is the same here.
   8834 		 */
   8835 		for (seg = 1; seg < dmamap->dm_nsegs;
   8836 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8837 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8838 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8839 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8840 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8841 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8842 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8843 			lasttx = nexttx;
   8844 
   8845 			DPRINTF(sc, WM_DEBUG_TX,
   8846 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8847 				device_xname(sc->sc_dev), nexttx,
   8848 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8849 				dmamap->dm_segs[seg].ds_len));
   8850 		}
   8851 
   8852 		KASSERT(lasttx != -1);
   8853 
   8854 		/*
   8855 		 * Set up the command byte on the last descriptor of
   8856 		 * the packet. If we're in the interrupt delay window,
   8857 		 * delay the interrupt.
   8858 		 */
   8859 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8860 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8861 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8862 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8863 
   8864 		txs->txs_lastdesc = lasttx;
   8865 
   8866 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8867 		    device_xname(sc->sc_dev),
   8868 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8869 
   8870 		/* Sync the descriptors we're using. */
   8871 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8872 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8873 
   8874 		/* Give the packet to the chip. */
   8875 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8876 		sent = true;
   8877 
   8878 		DPRINTF(sc, WM_DEBUG_TX,
   8879 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8880 
   8881 		DPRINTF(sc, WM_DEBUG_TX,
   8882 		    ("%s: TX: finished transmitting packet, job %d\n",
   8883 			device_xname(sc->sc_dev), txq->txq_snext));
   8884 
   8885 		/* Advance the tx pointer. */
   8886 		txq->txq_free -= txs->txs_ndesc;
   8887 		txq->txq_next = nexttx;
   8888 
   8889 		txq->txq_sfree--;
   8890 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8891 
   8892 		/* Pass the packet to any BPF listeners. */
   8893 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8894 	}
   8895 
   8896 	if (m0 != NULL) {
   8897 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8898 		WM_Q_EVCNT_INCR(txq, descdrop);
   8899 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8900 			__func__));
   8901 		m_freem(m0);
   8902 	}
   8903 
   8904 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8905 		/* No more slots; notify upper layer. */
   8906 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8907 	}
   8908 
   8909 	if (sent) {
   8910 		/* Set a watchdog timer in case the chip flakes out. */
   8911 		txq->txq_lastsent = time_uptime;
   8912 		txq->txq_sending = true;
   8913 	}
   8914 }
   8915 
   8916 static void
   8917 wm_deferred_start_locked(struct wm_txqueue *txq)
   8918 {
   8919 	struct wm_softc *sc = txq->txq_sc;
   8920 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8921 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8922 	int qid = wmq->wmq_id;
   8923 
   8924 	KASSERT(mutex_owned(txq->txq_lock));
   8925 
   8926 	if (txq->txq_stopping) {
   8927 		mutex_exit(txq->txq_lock);
   8928 		return;
   8929 	}
   8930 
   8931 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8932 		/* XXX need for ALTQ or one CPU system */
   8933 		if (qid == 0)
   8934 			wm_nq_start_locked(ifp);
   8935 		wm_nq_transmit_locked(ifp, txq);
   8936 	} else {
   8937 		/* XXX need for ALTQ or one CPU system */
   8938 		if (qid == 0)
   8939 			wm_start_locked(ifp);
   8940 		wm_transmit_locked(ifp, txq);
   8941 	}
   8942 }
   8943 
   8944 /* Interrupt */
   8945 
   8946 /*
   8947  * wm_txeof:
   8948  *
   8949  *	Helper; handle transmit interrupts.
   8950  */
   8951 static bool
   8952 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8953 {
   8954 	struct wm_softc *sc = txq->txq_sc;
   8955 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8956 	struct wm_txsoft *txs;
   8957 	int count = 0;
   8958 	int i;
   8959 	uint8_t status;
   8960 	bool more = false;
   8961 
   8962 	KASSERT(mutex_owned(txq->txq_lock));
   8963 
   8964 	if (txq->txq_stopping)
   8965 		return false;
   8966 
   8967 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8968 
   8969 	/*
   8970 	 * Go through the Tx list and free mbufs for those
   8971 	 * frames which have been transmitted.
   8972 	 */
   8973 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8974 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8975 		txs = &txq->txq_soft[i];
   8976 
   8977 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8978 			device_xname(sc->sc_dev), i));
   8979 
   8980 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8981 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8982 
   8983 		status =
   8984 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8985 		if ((status & WTX_ST_DD) == 0) {
   8986 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8987 			    BUS_DMASYNC_PREREAD);
   8988 			break;
   8989 		}
   8990 
   8991 		if (limit-- == 0) {
   8992 			more = true;
   8993 			DPRINTF(sc, WM_DEBUG_TX,
   8994 			    ("%s: TX: loop limited, job %d is not processed\n",
   8995 				device_xname(sc->sc_dev), i));
   8996 			break;
   8997 		}
   8998 
   8999 		count++;
   9000 		DPRINTF(sc, WM_DEBUG_TX,
   9001 		    ("%s: TX: job %d done: descs %d..%d\n",
   9002 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   9003 		    txs->txs_lastdesc));
   9004 
   9005 		/*
   9006 		 * XXX We should probably be using the statistics
   9007 		 * XXX registers, but I don't know if they exist
   9008 		 * XXX on chips before the i82544.
   9009 		 */
   9010 
   9011 #ifdef WM_EVENT_COUNTERS
   9012 		if (status & WTX_ST_TU)
   9013 			WM_Q_EVCNT_INCR(txq, underrun);
   9014 #endif /* WM_EVENT_COUNTERS */
   9015 
   9016 		/*
   9017 		 * 82574 and newer's document says the status field has neither
   9018 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   9019 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   9020 		 * Developer's Manual", 82574 datasheet and newer.
   9021 		 *
   9022 		 * XXX I saw the LC bit was set on I218 even though the media
   9023 		 * was full duplex, so the bit might be used for other
   9024 		 * meaning ...(I have no document).
   9025 		 */
   9026 
   9027 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   9028 		    && ((sc->sc_type < WM_T_82574)
   9029 			|| (sc->sc_type == WM_T_80003))) {
   9030 			if_statinc(ifp, if_oerrors);
   9031 			if (status & WTX_ST_LC)
   9032 				log(LOG_WARNING, "%s: late collision\n",
   9033 				    device_xname(sc->sc_dev));
   9034 			else if (status & WTX_ST_EC) {
   9035 				if_statadd(ifp, if_collisions,
   9036 				    TX_COLLISION_THRESHOLD + 1);
   9037 				log(LOG_WARNING, "%s: excessive collisions\n",
   9038 				    device_xname(sc->sc_dev));
   9039 			}
   9040 		} else
   9041 			if_statinc(ifp, if_opackets);
   9042 
   9043 		txq->txq_packets++;
   9044 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   9045 
   9046 		txq->txq_free += txs->txs_ndesc;
   9047 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   9048 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   9049 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   9050 		m_freem(txs->txs_mbuf);
   9051 		txs->txs_mbuf = NULL;
   9052 	}
   9053 
   9054 	/* Update the dirty transmit buffer pointer. */
   9055 	txq->txq_sdirty = i;
   9056 	DPRINTF(sc, WM_DEBUG_TX,
   9057 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   9058 
   9059 	if (count != 0)
   9060 		rnd_add_uint32(&sc->rnd_source, count);
   9061 
   9062 	/*
   9063 	 * If there are no more pending transmissions, cancel the watchdog
   9064 	 * timer.
   9065 	 */
   9066 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   9067 		txq->txq_sending = false;
   9068 
   9069 	return more;
   9070 }
   9071 
   9072 static inline uint32_t
   9073 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   9074 {
   9075 	struct wm_softc *sc = rxq->rxq_sc;
   9076 
   9077 	if (sc->sc_type == WM_T_82574)
   9078 		return EXTRXC_STATUS(
   9079 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9080 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9081 		return NQRXC_STATUS(
   9082 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9083 	else
   9084 		return rxq->rxq_descs[idx].wrx_status;
   9085 }
   9086 
   9087 static inline uint32_t
   9088 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   9089 {
   9090 	struct wm_softc *sc = rxq->rxq_sc;
   9091 
   9092 	if (sc->sc_type == WM_T_82574)
   9093 		return EXTRXC_ERROR(
   9094 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
   9095 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9096 		return NQRXC_ERROR(
   9097 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
   9098 	else
   9099 		return rxq->rxq_descs[idx].wrx_errors;
   9100 }
   9101 
   9102 static inline uint16_t
   9103 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   9104 {
   9105 	struct wm_softc *sc = rxq->rxq_sc;
   9106 
   9107 	if (sc->sc_type == WM_T_82574)
   9108 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   9109 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9110 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   9111 	else
   9112 		return rxq->rxq_descs[idx].wrx_special;
   9113 }
   9114 
   9115 static inline int
   9116 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   9117 {
   9118 	struct wm_softc *sc = rxq->rxq_sc;
   9119 
   9120 	if (sc->sc_type == WM_T_82574)
   9121 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   9122 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9123 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   9124 	else
   9125 		return rxq->rxq_descs[idx].wrx_len;
   9126 }
   9127 
   9128 #ifdef WM_DEBUG
   9129 static inline uint32_t
   9130 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   9131 {
   9132 	struct wm_softc *sc = rxq->rxq_sc;
   9133 
   9134 	if (sc->sc_type == WM_T_82574)
   9135 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   9136 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9137 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   9138 	else
   9139 		return 0;
   9140 }
   9141 
   9142 static inline uint8_t
   9143 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   9144 {
   9145 	struct wm_softc *sc = rxq->rxq_sc;
   9146 
   9147 	if (sc->sc_type == WM_T_82574)
   9148 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   9149 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9150 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   9151 	else
   9152 		return 0;
   9153 }
   9154 #endif /* WM_DEBUG */
   9155 
   9156 static inline bool
   9157 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   9158     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9159 {
   9160 
   9161 	if (sc->sc_type == WM_T_82574)
   9162 		return (status & ext_bit) != 0;
   9163 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9164 		return (status & nq_bit) != 0;
   9165 	else
   9166 		return (status & legacy_bit) != 0;
   9167 }
   9168 
   9169 static inline bool
   9170 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   9171     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   9172 {
   9173 
   9174 	if (sc->sc_type == WM_T_82574)
   9175 		return (error & ext_bit) != 0;
   9176 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   9177 		return (error & nq_bit) != 0;
   9178 	else
   9179 		return (error & legacy_bit) != 0;
   9180 }
   9181 
   9182 static inline bool
   9183 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   9184 {
   9185 
   9186 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9187 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   9188 		return true;
   9189 	else
   9190 		return false;
   9191 }
   9192 
   9193 static inline bool
   9194 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   9195 {
   9196 	struct wm_softc *sc = rxq->rxq_sc;
   9197 
   9198 	/* XXX missing error bit for newqueue? */
   9199 	if (wm_rxdesc_is_set_error(sc, errors,
   9200 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   9201 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   9202 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   9203 		NQRXC_ERROR_RXE)) {
   9204 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   9205 		    EXTRXC_ERROR_SE, 0))
   9206 			log(LOG_WARNING, "%s: symbol error\n",
   9207 			    device_xname(sc->sc_dev));
   9208 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   9209 		    EXTRXC_ERROR_SEQ, 0))
   9210 			log(LOG_WARNING, "%s: receive sequence error\n",
   9211 			    device_xname(sc->sc_dev));
   9212 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   9213 		    EXTRXC_ERROR_CE, 0))
   9214 			log(LOG_WARNING, "%s: CRC error\n",
   9215 			    device_xname(sc->sc_dev));
   9216 		return true;
   9217 	}
   9218 
   9219 	return false;
   9220 }
   9221 
   9222 static inline bool
   9223 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   9224 {
   9225 	struct wm_softc *sc = rxq->rxq_sc;
   9226 
   9227 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   9228 		NQRXC_STATUS_DD)) {
   9229 		/* We have processed all of the receive descriptors. */
   9230 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   9231 		return false;
   9232 	}
   9233 
   9234 	return true;
   9235 }
   9236 
   9237 static inline bool
   9238 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   9239     uint16_t vlantag, struct mbuf *m)
   9240 {
   9241 
   9242 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   9243 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   9244 		vlan_set_tag(m, le16toh(vlantag));
   9245 	}
   9246 
   9247 	return true;
   9248 }
   9249 
   9250 static inline void
   9251 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   9252     uint32_t errors, struct mbuf *m)
   9253 {
   9254 	struct wm_softc *sc = rxq->rxq_sc;
   9255 
   9256 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   9257 		if (wm_rxdesc_is_set_status(sc, status,
   9258 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   9259 			WM_Q_EVCNT_INCR(rxq, ipsum);
   9260 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   9261 			if (wm_rxdesc_is_set_error(sc, errors,
   9262 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   9263 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   9264 		}
   9265 		if (wm_rxdesc_is_set_status(sc, status,
   9266 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   9267 			/*
   9268 			 * Note: we don't know if this was TCP or UDP,
   9269 			 * so we just set both bits, and expect the
   9270 			 * upper layers to deal.
   9271 			 */
   9272 			WM_Q_EVCNT_INCR(rxq, tusum);
   9273 			m->m_pkthdr.csum_flags |=
   9274 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   9275 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   9276 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   9277 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   9278 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   9279 		}
   9280 	}
   9281 }
   9282 
   9283 /*
   9284  * wm_rxeof:
   9285  *
   9286  *	Helper; handle receive interrupts.
   9287  */
   9288 static bool
   9289 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   9290 {
   9291 	struct wm_softc *sc = rxq->rxq_sc;
   9292 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9293 	struct wm_rxsoft *rxs;
   9294 	struct mbuf *m;
   9295 	int i, len;
   9296 	int count = 0;
   9297 	uint32_t status, errors;
   9298 	uint16_t vlantag;
   9299 	bool more = false;
   9300 
   9301 	KASSERT(mutex_owned(rxq->rxq_lock));
   9302 
   9303 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   9304 		rxs = &rxq->rxq_soft[i];
   9305 
   9306 		DPRINTF(sc, WM_DEBUG_RX,
   9307 		    ("%s: RX: checking descriptor %d\n",
   9308 			device_xname(sc->sc_dev), i));
   9309 		wm_cdrxsync(rxq, i,
   9310 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   9311 
   9312 		status = wm_rxdesc_get_status(rxq, i);
   9313 		errors = wm_rxdesc_get_errors(rxq, i);
   9314 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   9315 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   9316 #ifdef WM_DEBUG
   9317 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   9318 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   9319 #endif
   9320 
   9321 		if (!wm_rxdesc_dd(rxq, i, status)) {
   9322 			break;
   9323 		}
   9324 
   9325 		if (limit-- == 0) {
   9326 			more = true;
   9327 			DPRINTF(sc, WM_DEBUG_RX,
   9328 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   9329 				device_xname(sc->sc_dev), i));
   9330 			break;
   9331 		}
   9332 
   9333 		count++;
   9334 		if (__predict_false(rxq->rxq_discard)) {
   9335 			DPRINTF(sc, WM_DEBUG_RX,
   9336 			    ("%s: RX: discarding contents of descriptor %d\n",
   9337 				device_xname(sc->sc_dev), i));
   9338 			wm_init_rxdesc(rxq, i);
   9339 			if (wm_rxdesc_is_eop(rxq, status)) {
   9340 				/* Reset our state. */
   9341 				DPRINTF(sc, WM_DEBUG_RX,
   9342 				    ("%s: RX: resetting rxdiscard -> 0\n",
   9343 					device_xname(sc->sc_dev)));
   9344 				rxq->rxq_discard = 0;
   9345 			}
   9346 			continue;
   9347 		}
   9348 
   9349 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9350 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   9351 
   9352 		m = rxs->rxs_mbuf;
   9353 
   9354 		/*
   9355 		 * Add a new receive buffer to the ring, unless of
   9356 		 * course the length is zero. Treat the latter as a
   9357 		 * failed mapping.
   9358 		 */
   9359 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   9360 			/*
   9361 			 * Failed, throw away what we've done so
   9362 			 * far, and discard the rest of the packet.
   9363 			 */
   9364 			if_statinc(ifp, if_ierrors);
   9365 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   9366 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9367 			wm_init_rxdesc(rxq, i);
   9368 			if (!wm_rxdesc_is_eop(rxq, status))
   9369 				rxq->rxq_discard = 1;
   9370 			if (rxq->rxq_head != NULL)
   9371 				m_freem(rxq->rxq_head);
   9372 			WM_RXCHAIN_RESET(rxq);
   9373 			DPRINTF(sc, WM_DEBUG_RX,
   9374 			    ("%s: RX: Rx buffer allocation failed, "
   9375 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9376 				rxq->rxq_discard ? " (discard)" : ""));
   9377 			continue;
   9378 		}
   9379 
   9380 		m->m_len = len;
   9381 		rxq->rxq_len += len;
   9382 		DPRINTF(sc, WM_DEBUG_RX,
   9383 		    ("%s: RX: buffer at %p len %d\n",
   9384 			device_xname(sc->sc_dev), m->m_data, len));
   9385 
   9386 		/* If this is not the end of the packet, keep looking. */
   9387 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9388 			WM_RXCHAIN_LINK(rxq, m);
   9389 			DPRINTF(sc, WM_DEBUG_RX,
   9390 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9391 				device_xname(sc->sc_dev), rxq->rxq_len));
   9392 			continue;
   9393 		}
   9394 
   9395 		/*
   9396 		 * Okay, we have the entire packet now. The chip is
   9397 		 * configured to include the FCS except I35[04], I21[01].
   9398 		 * (not all chips can be configured to strip it), so we need
   9399 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
   9400 		 * in RCTL register is always set, so we don't trim it.
   9401 		 * PCH2 and newer chip also not include FCS when jumbo
   9402 		 * frame is used to do workaround an errata.
   9403 		 * May need to adjust length of previous mbuf in the
   9404 		 * chain if the current mbuf is too short.
   9405 		 */
   9406 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
   9407 			if (m->m_len < ETHER_CRC_LEN) {
   9408 				rxq->rxq_tail->m_len
   9409 				    -= (ETHER_CRC_LEN - m->m_len);
   9410 				m->m_len = 0;
   9411 			} else
   9412 				m->m_len -= ETHER_CRC_LEN;
   9413 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9414 		} else
   9415 			len = rxq->rxq_len;
   9416 
   9417 		WM_RXCHAIN_LINK(rxq, m);
   9418 
   9419 		*rxq->rxq_tailp = NULL;
   9420 		m = rxq->rxq_head;
   9421 
   9422 		WM_RXCHAIN_RESET(rxq);
   9423 
   9424 		DPRINTF(sc, WM_DEBUG_RX,
   9425 		    ("%s: RX: have entire packet, len -> %d\n",
   9426 			device_xname(sc->sc_dev), len));
   9427 
   9428 		/* If an error occurred, update stats and drop the packet. */
   9429 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9430 			m_freem(m);
   9431 			continue;
   9432 		}
   9433 
   9434 		/* No errors.  Receive the packet. */
   9435 		m_set_rcvif(m, ifp);
   9436 		m->m_pkthdr.len = len;
   9437 		/*
   9438 		 * TODO
   9439 		 * should be save rsshash and rsstype to this mbuf.
   9440 		 */
   9441 		DPRINTF(sc, WM_DEBUG_RX,
   9442 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9443 			device_xname(sc->sc_dev), rsstype, rsshash));
   9444 
   9445 		/*
   9446 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9447 		 * for us.  Associate the tag with the packet.
   9448 		 */
   9449 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9450 			continue;
   9451 
   9452 		/* Set up checksum info for this packet. */
   9453 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9454 
   9455 		rxq->rxq_packets++;
   9456 		rxq->rxq_bytes += len;
   9457 		/* Pass it on. */
   9458 		if_percpuq_enqueue(sc->sc_ipq, m);
   9459 
   9460 		if (rxq->rxq_stopping)
   9461 			break;
   9462 	}
   9463 	rxq->rxq_ptr = i;
   9464 
   9465 	if (count != 0)
   9466 		rnd_add_uint32(&sc->rnd_source, count);
   9467 
   9468 	DPRINTF(sc, WM_DEBUG_RX,
   9469 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9470 
   9471 	return more;
   9472 }
   9473 
   9474 /*
   9475  * wm_linkintr_gmii:
   9476  *
   9477  *	Helper; handle link interrupts for GMII.
   9478  */
   9479 static void
   9480 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9481 {
   9482 	device_t dev = sc->sc_dev;
   9483 	uint32_t status, reg;
   9484 	bool link;
   9485 	int rv;
   9486 
   9487 	KASSERT(WM_CORE_LOCKED(sc));
   9488 
   9489 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9490 		__func__));
   9491 
   9492 	if ((icr & ICR_LSC) == 0) {
   9493 		if (icr & ICR_RXSEQ)
   9494 			DPRINTF(sc, WM_DEBUG_LINK,
   9495 			    ("%s: LINK Receive sequence error\n",
   9496 				device_xname(dev)));
   9497 		return;
   9498 	}
   9499 
   9500 	/* Link status changed */
   9501 	status = CSR_READ(sc, WMREG_STATUS);
   9502 	link = status & STATUS_LU;
   9503 	if (link) {
   9504 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9505 			device_xname(dev),
   9506 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9507 		if (wm_phy_need_linkdown_discard(sc)) {
   9508 			DPRINTF(sc, WM_DEBUG_LINK,
   9509 			    ("%s: linkintr: Clear linkdown discard flag\n",
   9510 				device_xname(dev)));
   9511 			wm_clear_linkdown_discard(sc);
   9512 		}
   9513 	} else {
   9514 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9515 			device_xname(dev)));
   9516 		if (wm_phy_need_linkdown_discard(sc)) {
   9517 			DPRINTF(sc, WM_DEBUG_LINK,
   9518 			    ("%s: linkintr: Set linkdown discard flag\n",
   9519 				device_xname(dev)));
   9520 			wm_set_linkdown_discard(sc);
   9521 		}
   9522 	}
   9523 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9524 		wm_gig_downshift_workaround_ich8lan(sc);
   9525 
   9526 	if ((sc->sc_type == WM_T_ICH8)
   9527 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9528 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9529 	}
   9530 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9531 		device_xname(dev)));
   9532 	mii_pollstat(&sc->sc_mii);
   9533 	if (sc->sc_type == WM_T_82543) {
   9534 		int miistatus, active;
   9535 
   9536 		/*
   9537 		 * With 82543, we need to force speed and
   9538 		 * duplex on the MAC equal to what the PHY
   9539 		 * speed and duplex configuration is.
   9540 		 */
   9541 		miistatus = sc->sc_mii.mii_media_status;
   9542 
   9543 		if (miistatus & IFM_ACTIVE) {
   9544 			active = sc->sc_mii.mii_media_active;
   9545 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9546 			switch (IFM_SUBTYPE(active)) {
   9547 			case IFM_10_T:
   9548 				sc->sc_ctrl |= CTRL_SPEED_10;
   9549 				break;
   9550 			case IFM_100_TX:
   9551 				sc->sc_ctrl |= CTRL_SPEED_100;
   9552 				break;
   9553 			case IFM_1000_T:
   9554 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9555 				break;
   9556 			default:
   9557 				/*
   9558 				 * Fiber?
   9559 				 * Shoud not enter here.
   9560 				 */
   9561 				device_printf(dev, "unknown media (%x)\n",
   9562 				    active);
   9563 				break;
   9564 			}
   9565 			if (active & IFM_FDX)
   9566 				sc->sc_ctrl |= CTRL_FD;
   9567 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9568 		}
   9569 	} else if (sc->sc_type == WM_T_PCH) {
   9570 		wm_k1_gig_workaround_hv(sc,
   9571 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9572 	}
   9573 
   9574 	/*
   9575 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9576 	 * aggressive resulting in many collisions. To avoid this, increase
   9577 	 * the IPG and reduce Rx latency in the PHY.
   9578 	 */
   9579 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9580 	    && link) {
   9581 		uint32_t tipg_reg;
   9582 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9583 		bool fdx;
   9584 		uint16_t emi_addr, emi_val;
   9585 
   9586 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9587 		tipg_reg &= ~TIPG_IPGT_MASK;
   9588 		fdx = status & STATUS_FD;
   9589 
   9590 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9591 			tipg_reg |= 0xff;
   9592 			/* Reduce Rx latency in analog PHY */
   9593 			emi_val = 0;
   9594 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9595 		    fdx && speed != STATUS_SPEED_1000) {
   9596 			tipg_reg |= 0xc;
   9597 			emi_val = 1;
   9598 		} else {
   9599 			/* Roll back the default values */
   9600 			tipg_reg |= 0x08;
   9601 			emi_val = 1;
   9602 		}
   9603 
   9604 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9605 
   9606 		rv = sc->phy.acquire(sc);
   9607 		if (rv)
   9608 			return;
   9609 
   9610 		if (sc->sc_type == WM_T_PCH2)
   9611 			emi_addr = I82579_RX_CONFIG;
   9612 		else
   9613 			emi_addr = I217_RX_CONFIG;
   9614 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9615 
   9616 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9617 			uint16_t phy_reg;
   9618 
   9619 			sc->phy.readreg_locked(dev, 2,
   9620 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9621 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9622 			if (speed == STATUS_SPEED_100
   9623 			    || speed == STATUS_SPEED_10)
   9624 				phy_reg |= 0x3e8;
   9625 			else
   9626 				phy_reg |= 0xfa;
   9627 			sc->phy.writereg_locked(dev, 2,
   9628 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9629 
   9630 			if (speed == STATUS_SPEED_1000) {
   9631 				sc->phy.readreg_locked(dev, 2,
   9632 				    HV_PM_CTRL, &phy_reg);
   9633 
   9634 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9635 
   9636 				sc->phy.writereg_locked(dev, 2,
   9637 				    HV_PM_CTRL, phy_reg);
   9638 			}
   9639 		}
   9640 		sc->phy.release(sc);
   9641 
   9642 		if (rv)
   9643 			return;
   9644 
   9645 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9646 			uint16_t data, ptr_gap;
   9647 
   9648 			if (speed == STATUS_SPEED_1000) {
   9649 				rv = sc->phy.acquire(sc);
   9650 				if (rv)
   9651 					return;
   9652 
   9653 				rv = sc->phy.readreg_locked(dev, 2,
   9654 				    I82579_UNKNOWN1, &data);
   9655 				if (rv) {
   9656 					sc->phy.release(sc);
   9657 					return;
   9658 				}
   9659 
   9660 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9661 				if (ptr_gap < 0x18) {
   9662 					data &= ~(0x3ff << 2);
   9663 					data |= (0x18 << 2);
   9664 					rv = sc->phy.writereg_locked(dev,
   9665 					    2, I82579_UNKNOWN1, data);
   9666 				}
   9667 				sc->phy.release(sc);
   9668 				if (rv)
   9669 					return;
   9670 			} else {
   9671 				rv = sc->phy.acquire(sc);
   9672 				if (rv)
   9673 					return;
   9674 
   9675 				rv = sc->phy.writereg_locked(dev, 2,
   9676 				    I82579_UNKNOWN1, 0xc023);
   9677 				sc->phy.release(sc);
   9678 				if (rv)
   9679 					return;
   9680 
   9681 			}
   9682 		}
   9683 	}
   9684 
   9685 	/*
   9686 	 * I217 Packet Loss issue:
   9687 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9688 	 * on power up.
   9689 	 * Set the Beacon Duration for I217 to 8 usec
   9690 	 */
   9691 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9692 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9693 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9694 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9695 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9696 	}
   9697 
   9698 	/* Work-around I218 hang issue */
   9699 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9700 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9701 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9702 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9703 		wm_k1_workaround_lpt_lp(sc, link);
   9704 
   9705 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9706 		/*
   9707 		 * Set platform power management values for Latency
   9708 		 * Tolerance Reporting (LTR)
   9709 		 */
   9710 		wm_platform_pm_pch_lpt(sc,
   9711 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9712 	}
   9713 
   9714 	/* Clear link partner's EEE ability */
   9715 	sc->eee_lp_ability = 0;
   9716 
   9717 	/* FEXTNVM6 K1-off workaround */
   9718 	if (sc->sc_type == WM_T_PCH_SPT) {
   9719 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9720 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9721 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9722 		else
   9723 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9724 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9725 	}
   9726 
   9727 	if (!link)
   9728 		return;
   9729 
   9730 	switch (sc->sc_type) {
   9731 	case WM_T_PCH2:
   9732 		wm_k1_workaround_lv(sc);
   9733 		/* FALLTHROUGH */
   9734 	case WM_T_PCH:
   9735 		if (sc->sc_phytype == WMPHY_82578)
   9736 			wm_link_stall_workaround_hv(sc);
   9737 		break;
   9738 	default:
   9739 		break;
   9740 	}
   9741 
   9742 	/* Enable/Disable EEE after link up */
   9743 	if (sc->sc_phytype > WMPHY_82579)
   9744 		wm_set_eee_pchlan(sc);
   9745 }
   9746 
   9747 /*
   9748  * wm_linkintr_tbi:
   9749  *
   9750  *	Helper; handle link interrupts for TBI mode.
   9751  */
   9752 static void
   9753 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9754 {
   9755 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9756 	uint32_t status;
   9757 
   9758 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9759 		__func__));
   9760 
   9761 	status = CSR_READ(sc, WMREG_STATUS);
   9762 	if (icr & ICR_LSC) {
   9763 		wm_check_for_link(sc);
   9764 		if (status & STATUS_LU) {
   9765 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9766 				device_xname(sc->sc_dev),
   9767 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9768 			/*
   9769 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9770 			 * so we should update sc->sc_ctrl
   9771 			 */
   9772 
   9773 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9774 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9775 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9776 			if (status & STATUS_FD)
   9777 				sc->sc_tctl |=
   9778 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9779 			else
   9780 				sc->sc_tctl |=
   9781 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9782 			if (sc->sc_ctrl & CTRL_TFCE)
   9783 				sc->sc_fcrtl |= FCRTL_XONE;
   9784 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9785 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9786 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9787 			sc->sc_tbi_linkup = 1;
   9788 			if_link_state_change(ifp, LINK_STATE_UP);
   9789 		} else {
   9790 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9791 				device_xname(sc->sc_dev)));
   9792 			sc->sc_tbi_linkup = 0;
   9793 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9794 		}
   9795 		/* Update LED */
   9796 		wm_tbi_serdes_set_linkled(sc);
   9797 	} else if (icr & ICR_RXSEQ)
   9798 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9799 			device_xname(sc->sc_dev)));
   9800 }
   9801 
   9802 /*
   9803  * wm_linkintr_serdes:
   9804  *
   9805  *	Helper; handle link interrupts for TBI mode.
   9806  */
   9807 static void
   9808 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9809 {
   9810 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9811 	struct mii_data *mii = &sc->sc_mii;
   9812 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9813 	uint32_t pcs_adv, pcs_lpab, reg;
   9814 
   9815 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9816 		__func__));
   9817 
   9818 	if (icr & ICR_LSC) {
   9819 		/* Check PCS */
   9820 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9821 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9822 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9823 				device_xname(sc->sc_dev)));
   9824 			mii->mii_media_status |= IFM_ACTIVE;
   9825 			sc->sc_tbi_linkup = 1;
   9826 			if_link_state_change(ifp, LINK_STATE_UP);
   9827 		} else {
   9828 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9829 				device_xname(sc->sc_dev)));
   9830 			mii->mii_media_status |= IFM_NONE;
   9831 			sc->sc_tbi_linkup = 0;
   9832 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9833 			wm_tbi_serdes_set_linkled(sc);
   9834 			return;
   9835 		}
   9836 		mii->mii_media_active |= IFM_1000_SX;
   9837 		if ((reg & PCS_LSTS_FDX) != 0)
   9838 			mii->mii_media_active |= IFM_FDX;
   9839 		else
   9840 			mii->mii_media_active |= IFM_HDX;
   9841 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9842 			/* Check flow */
   9843 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9844 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9845 				DPRINTF(sc, WM_DEBUG_LINK,
   9846 				    ("XXX LINKOK but not ACOMP\n"));
   9847 				return;
   9848 			}
   9849 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9850 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9851 			DPRINTF(sc, WM_DEBUG_LINK,
   9852 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9853 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9854 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9855 				mii->mii_media_active |= IFM_FLOW
   9856 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9857 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9858 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9859 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9860 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9861 				mii->mii_media_active |= IFM_FLOW
   9862 				    | IFM_ETH_TXPAUSE;
   9863 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9864 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9865 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9866 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9867 				mii->mii_media_active |= IFM_FLOW
   9868 				    | IFM_ETH_RXPAUSE;
   9869 		}
   9870 		/* Update LED */
   9871 		wm_tbi_serdes_set_linkled(sc);
   9872 	} else
   9873 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9874 		    device_xname(sc->sc_dev)));
   9875 }
   9876 
   9877 /*
   9878  * wm_linkintr:
   9879  *
   9880  *	Helper; handle link interrupts.
   9881  */
   9882 static void
   9883 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9884 {
   9885 
   9886 	KASSERT(WM_CORE_LOCKED(sc));
   9887 
   9888 	if (sc->sc_flags & WM_F_HAS_MII)
   9889 		wm_linkintr_gmii(sc, icr);
   9890 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9891 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9892 		wm_linkintr_serdes(sc, icr);
   9893 	else
   9894 		wm_linkintr_tbi(sc, icr);
   9895 }
   9896 
   9897 
   9898 static inline void
   9899 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9900 {
   9901 
   9902 	if (wmq->wmq_txrx_use_workqueue)
   9903 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9904 	else
   9905 		softint_schedule(wmq->wmq_si);
   9906 }
   9907 
   9908 static inline void
   9909 wm_legacy_intr_disable(struct wm_softc *sc)
   9910 {
   9911 
   9912 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   9913 }
   9914 
   9915 static inline void
   9916 wm_legacy_intr_enable(struct wm_softc *sc)
   9917 {
   9918 
   9919 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   9920 }
   9921 
   9922 /*
   9923  * wm_intr_legacy:
   9924  *
   9925  *	Interrupt service routine for INTx and MSI.
   9926  */
   9927 static int
   9928 wm_intr_legacy(void *arg)
   9929 {
   9930 	struct wm_softc *sc = arg;
   9931 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9932 	struct wm_queue *wmq = &sc->sc_queue[0];
   9933 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9934 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9935 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9936 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9937 	uint32_t icr, rndval = 0;
   9938 	bool more = false;
   9939 
   9940 	icr = CSR_READ(sc, WMREG_ICR);
   9941 	if ((icr & sc->sc_icr) == 0)
   9942 		return 0;
   9943 
   9944 	DPRINTF(sc, WM_DEBUG_TX,
   9945 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9946 	if (rndval == 0)
   9947 		rndval = icr;
   9948 
   9949 	mutex_enter(txq->txq_lock);
   9950 
   9951 	if (txq->txq_stopping) {
   9952 		mutex_exit(txq->txq_lock);
   9953 		return 1;
   9954 	}
   9955 
   9956 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9957 	if (icr & ICR_TXDW) {
   9958 		DPRINTF(sc, WM_DEBUG_TX,
   9959 		    ("%s: TX: got TXDW interrupt\n",
   9960 			device_xname(sc->sc_dev)));
   9961 		WM_Q_EVCNT_INCR(txq, txdw);
   9962 	}
   9963 #endif
   9964 	if (txlimit > 0) {
   9965 		more |= wm_txeof(txq, txlimit);
   9966 		if (!IF_IS_EMPTY(&ifp->if_snd))
   9967 			more = true;
   9968 	} else
   9969 		more = true;
   9970 	mutex_exit(txq->txq_lock);
   9971 
   9972 	mutex_enter(rxq->rxq_lock);
   9973 
   9974 	if (rxq->rxq_stopping) {
   9975 		mutex_exit(rxq->rxq_lock);
   9976 		return 1;
   9977 	}
   9978 
   9979 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9980 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9981 		DPRINTF(sc, WM_DEBUG_RX,
   9982 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
   9983 			device_xname(sc->sc_dev),
   9984 			icr & (ICR_RXDMT0 | ICR_RXT0)));
   9985 		WM_Q_EVCNT_INCR(rxq, intr);
   9986 	}
   9987 #endif
   9988 	if (rxlimit > 0) {
   9989 		/*
   9990 		 * wm_rxeof() does *not* call upper layer functions directly,
   9991 		 * as if_percpuq_enqueue() just call softint_schedule().
   9992 		 * So, we can call wm_rxeof() in interrupt context.
   9993 		 */
   9994 		more = wm_rxeof(rxq, rxlimit);
   9995 	} else
   9996 		more = true;
   9997 
   9998 	mutex_exit(rxq->rxq_lock);
   9999 
   10000 	WM_CORE_LOCK(sc);
   10001 
   10002 	if (sc->sc_core_stopping) {
   10003 		WM_CORE_UNLOCK(sc);
   10004 		return 1;
   10005 	}
   10006 
   10007 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
   10008 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10009 		wm_linkintr(sc, icr);
   10010 	}
   10011 	if ((icr & ICR_GPI(0)) != 0)
   10012 		device_printf(sc->sc_dev, "got module interrupt\n");
   10013 
   10014 	WM_CORE_UNLOCK(sc);
   10015 
   10016 	if (icr & ICR_RXO) {
   10017 #if defined(WM_DEBUG)
   10018 		log(LOG_WARNING, "%s: Receive overrun\n",
   10019 		    device_xname(sc->sc_dev));
   10020 #endif /* defined(WM_DEBUG) */
   10021 	}
   10022 
   10023 	rnd_add_uint32(&sc->rnd_source, rndval);
   10024 
   10025 	if (more) {
   10026 		/* Try to get more packets going. */
   10027 		wm_legacy_intr_disable(sc);
   10028 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10029 		wm_sched_handle_queue(sc, wmq);
   10030 	}
   10031 
   10032 	return 1;
   10033 }
   10034 
   10035 static inline void
   10036 wm_txrxintr_disable(struct wm_queue *wmq)
   10037 {
   10038 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10039 
   10040 	if (__predict_false(!wm_is_using_msix(sc))) {
   10041 		wm_legacy_intr_disable(sc);
   10042 		return;
   10043 	}
   10044 
   10045 	if (sc->sc_type == WM_T_82574)
   10046 		CSR_WRITE(sc, WMREG_IMC,
   10047 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   10048 	else if (sc->sc_type == WM_T_82575)
   10049 		CSR_WRITE(sc, WMREG_EIMC,
   10050 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10051 	else
   10052 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   10053 }
   10054 
   10055 static inline void
   10056 wm_txrxintr_enable(struct wm_queue *wmq)
   10057 {
   10058 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   10059 
   10060 	wm_itrs_calculate(sc, wmq);
   10061 
   10062 	if (__predict_false(!wm_is_using_msix(sc))) {
   10063 		wm_legacy_intr_enable(sc);
   10064 		return;
   10065 	}
   10066 
   10067 	/*
   10068 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   10069 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   10070 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   10071 	 * while each wm_handle_queue(wmq) is runnig.
   10072 	 */
   10073 	if (sc->sc_type == WM_T_82574)
   10074 		CSR_WRITE(sc, WMREG_IMS,
   10075 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   10076 	else if (sc->sc_type == WM_T_82575)
   10077 		CSR_WRITE(sc, WMREG_EIMS,
   10078 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   10079 	else
   10080 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   10081 }
   10082 
   10083 static int
   10084 wm_txrxintr_msix(void *arg)
   10085 {
   10086 	struct wm_queue *wmq = arg;
   10087 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10088 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10089 	struct wm_softc *sc = txq->txq_sc;
   10090 	u_int txlimit = sc->sc_tx_intr_process_limit;
   10091 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   10092 	bool txmore;
   10093 	bool rxmore;
   10094 
   10095 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   10096 
   10097 	DPRINTF(sc, WM_DEBUG_TX,
   10098 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   10099 
   10100 	wm_txrxintr_disable(wmq);
   10101 
   10102 	mutex_enter(txq->txq_lock);
   10103 
   10104 	if (txq->txq_stopping) {
   10105 		mutex_exit(txq->txq_lock);
   10106 		return 1;
   10107 	}
   10108 
   10109 	WM_Q_EVCNT_INCR(txq, txdw);
   10110 	if (txlimit > 0) {
   10111 		txmore = wm_txeof(txq, txlimit);
   10112 		/* wm_deferred start() is done in wm_handle_queue(). */
   10113 	} else
   10114 		txmore = true;
   10115 	mutex_exit(txq->txq_lock);
   10116 
   10117 	DPRINTF(sc, WM_DEBUG_RX,
   10118 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   10119 	mutex_enter(rxq->rxq_lock);
   10120 
   10121 	if (rxq->rxq_stopping) {
   10122 		mutex_exit(rxq->rxq_lock);
   10123 		return 1;
   10124 	}
   10125 
   10126 	WM_Q_EVCNT_INCR(rxq, intr);
   10127 	if (rxlimit > 0) {
   10128 		rxmore = wm_rxeof(rxq, rxlimit);
   10129 	} else
   10130 		rxmore = true;
   10131 	mutex_exit(rxq->rxq_lock);
   10132 
   10133 	wm_itrs_writereg(sc, wmq);
   10134 
   10135 	if (txmore || rxmore) {
   10136 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10137 		wm_sched_handle_queue(sc, wmq);
   10138 	} else
   10139 		wm_txrxintr_enable(wmq);
   10140 
   10141 	return 1;
   10142 }
   10143 
   10144 static void
   10145 wm_handle_queue(void *arg)
   10146 {
   10147 	struct wm_queue *wmq = arg;
   10148 	struct wm_txqueue *txq = &wmq->wmq_txq;
   10149 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   10150 	struct wm_softc *sc = txq->txq_sc;
   10151 	u_int txlimit = sc->sc_tx_process_limit;
   10152 	u_int rxlimit = sc->sc_rx_process_limit;
   10153 	bool txmore;
   10154 	bool rxmore;
   10155 
   10156 	mutex_enter(txq->txq_lock);
   10157 	if (txq->txq_stopping) {
   10158 		mutex_exit(txq->txq_lock);
   10159 		return;
   10160 	}
   10161 	txmore = wm_txeof(txq, txlimit);
   10162 	wm_deferred_start_locked(txq);
   10163 	mutex_exit(txq->txq_lock);
   10164 
   10165 	mutex_enter(rxq->rxq_lock);
   10166 	if (rxq->rxq_stopping) {
   10167 		mutex_exit(rxq->rxq_lock);
   10168 		return;
   10169 	}
   10170 	WM_Q_EVCNT_INCR(rxq, defer);
   10171 	rxmore = wm_rxeof(rxq, rxlimit);
   10172 	mutex_exit(rxq->rxq_lock);
   10173 
   10174 	if (txmore || rxmore) {
   10175 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   10176 		wm_sched_handle_queue(sc, wmq);
   10177 	} else
   10178 		wm_txrxintr_enable(wmq);
   10179 }
   10180 
   10181 static void
   10182 wm_handle_queue_work(struct work *wk, void *context)
   10183 {
   10184 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   10185 
   10186 	/*
   10187 	 * "enqueued flag" is not required here.
   10188 	 */
   10189 	wm_handle_queue(wmq);
   10190 }
   10191 
   10192 /*
   10193  * wm_linkintr_msix:
   10194  *
   10195  *	Interrupt service routine for link status change for MSI-X.
   10196  */
   10197 static int
   10198 wm_linkintr_msix(void *arg)
   10199 {
   10200 	struct wm_softc *sc = arg;
   10201 	uint32_t reg;
   10202 	bool has_rxo;
   10203 
   10204 	reg = CSR_READ(sc, WMREG_ICR);
   10205 	WM_CORE_LOCK(sc);
   10206 	DPRINTF(sc, WM_DEBUG_LINK,
   10207 	    ("%s: LINK: got link intr. ICR = %08x\n",
   10208 		device_xname(sc->sc_dev), reg));
   10209 
   10210 	if (sc->sc_core_stopping)
   10211 		goto out;
   10212 
   10213 	if ((reg & ICR_LSC) != 0) {
   10214 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   10215 		wm_linkintr(sc, ICR_LSC);
   10216 	}
   10217 	if ((reg & ICR_GPI(0)) != 0)
   10218 		device_printf(sc->sc_dev, "got module interrupt\n");
   10219 
   10220 	/*
   10221 	 * XXX 82574 MSI-X mode workaround
   10222 	 *
   10223 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   10224 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   10225 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   10226 	 * interrupts by writing WMREG_ICS to process receive packets.
   10227 	 */
   10228 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   10229 #if defined(WM_DEBUG)
   10230 		log(LOG_WARNING, "%s: Receive overrun\n",
   10231 		    device_xname(sc->sc_dev));
   10232 #endif /* defined(WM_DEBUG) */
   10233 
   10234 		has_rxo = true;
   10235 		/*
   10236 		 * The RXO interrupt is very high rate when receive traffic is
   10237 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   10238 		 * interrupts. ICR_OTHER will be enabled at the end of
   10239 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   10240 		 * ICR_RXQ(1) interrupts.
   10241 		 */
   10242 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   10243 
   10244 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   10245 	}
   10246 
   10247 
   10248 
   10249 out:
   10250 	WM_CORE_UNLOCK(sc);
   10251 
   10252 	if (sc->sc_type == WM_T_82574) {
   10253 		if (!has_rxo)
   10254 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   10255 		else
   10256 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   10257 	} else if (sc->sc_type == WM_T_82575)
   10258 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   10259 	else
   10260 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   10261 
   10262 	return 1;
   10263 }
   10264 
   10265 /*
   10266  * Media related.
   10267  * GMII, SGMII, TBI (and SERDES)
   10268  */
   10269 
   10270 /* Common */
   10271 
   10272 /*
   10273  * wm_tbi_serdes_set_linkled:
   10274  *
   10275  *	Update the link LED on TBI and SERDES devices.
   10276  */
   10277 static void
   10278 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   10279 {
   10280 
   10281 	if (sc->sc_tbi_linkup)
   10282 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   10283 	else
   10284 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   10285 
   10286 	/* 82540 or newer devices are active low */
   10287 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   10288 
   10289 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10290 }
   10291 
   10292 /* GMII related */
   10293 
   10294 /*
   10295  * wm_gmii_reset:
   10296  *
   10297  *	Reset the PHY.
   10298  */
   10299 static void
   10300 wm_gmii_reset(struct wm_softc *sc)
   10301 {
   10302 	uint32_t reg;
   10303 	int rv;
   10304 
   10305 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10306 		device_xname(sc->sc_dev), __func__));
   10307 
   10308 	rv = sc->phy.acquire(sc);
   10309 	if (rv != 0) {
   10310 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10311 		    __func__);
   10312 		return;
   10313 	}
   10314 
   10315 	switch (sc->sc_type) {
   10316 	case WM_T_82542_2_0:
   10317 	case WM_T_82542_2_1:
   10318 		/* null */
   10319 		break;
   10320 	case WM_T_82543:
   10321 		/*
   10322 		 * With 82543, we need to force speed and duplex on the MAC
   10323 		 * equal to what the PHY speed and duplex configuration is.
   10324 		 * In addition, we need to perform a hardware reset on the PHY
   10325 		 * to take it out of reset.
   10326 		 */
   10327 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10328 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10329 
   10330 		/* The PHY reset pin is active-low. */
   10331 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10332 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   10333 		    CTRL_EXT_SWDPIN(4));
   10334 		reg |= CTRL_EXT_SWDPIO(4);
   10335 
   10336 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10337 		CSR_WRITE_FLUSH(sc);
   10338 		delay(10*1000);
   10339 
   10340 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   10341 		CSR_WRITE_FLUSH(sc);
   10342 		delay(150);
   10343 #if 0
   10344 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   10345 #endif
   10346 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   10347 		break;
   10348 	case WM_T_82544:	/* Reset 10000us */
   10349 	case WM_T_82540:
   10350 	case WM_T_82545:
   10351 	case WM_T_82545_3:
   10352 	case WM_T_82546:
   10353 	case WM_T_82546_3:
   10354 	case WM_T_82541:
   10355 	case WM_T_82541_2:
   10356 	case WM_T_82547:
   10357 	case WM_T_82547_2:
   10358 	case WM_T_82571:	/* Reset 100us */
   10359 	case WM_T_82572:
   10360 	case WM_T_82573:
   10361 	case WM_T_82574:
   10362 	case WM_T_82575:
   10363 	case WM_T_82576:
   10364 	case WM_T_82580:
   10365 	case WM_T_I350:
   10366 	case WM_T_I354:
   10367 	case WM_T_I210:
   10368 	case WM_T_I211:
   10369 	case WM_T_82583:
   10370 	case WM_T_80003:
   10371 		/* Generic reset */
   10372 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10373 		CSR_WRITE_FLUSH(sc);
   10374 		delay(20000);
   10375 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10376 		CSR_WRITE_FLUSH(sc);
   10377 		delay(20000);
   10378 
   10379 		if ((sc->sc_type == WM_T_82541)
   10380 		    || (sc->sc_type == WM_T_82541_2)
   10381 		    || (sc->sc_type == WM_T_82547)
   10382 		    || (sc->sc_type == WM_T_82547_2)) {
   10383 			/* Workaround for igp are done in igp_reset() */
   10384 			/* XXX add code to set LED after phy reset */
   10385 		}
   10386 		break;
   10387 	case WM_T_ICH8:
   10388 	case WM_T_ICH9:
   10389 	case WM_T_ICH10:
   10390 	case WM_T_PCH:
   10391 	case WM_T_PCH2:
   10392 	case WM_T_PCH_LPT:
   10393 	case WM_T_PCH_SPT:
   10394 	case WM_T_PCH_CNP:
   10395 		/* Generic reset */
   10396 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10397 		CSR_WRITE_FLUSH(sc);
   10398 		delay(100);
   10399 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10400 		CSR_WRITE_FLUSH(sc);
   10401 		delay(150);
   10402 		break;
   10403 	default:
   10404 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10405 		    __func__);
   10406 		break;
   10407 	}
   10408 
   10409 	sc->phy.release(sc);
   10410 
   10411 	/* get_cfg_done */
   10412 	wm_get_cfg_done(sc);
   10413 
   10414 	/* Extra setup */
   10415 	switch (sc->sc_type) {
   10416 	case WM_T_82542_2_0:
   10417 	case WM_T_82542_2_1:
   10418 	case WM_T_82543:
   10419 	case WM_T_82544:
   10420 	case WM_T_82540:
   10421 	case WM_T_82545:
   10422 	case WM_T_82545_3:
   10423 	case WM_T_82546:
   10424 	case WM_T_82546_3:
   10425 	case WM_T_82541_2:
   10426 	case WM_T_82547_2:
   10427 	case WM_T_82571:
   10428 	case WM_T_82572:
   10429 	case WM_T_82573:
   10430 	case WM_T_82574:
   10431 	case WM_T_82583:
   10432 	case WM_T_82575:
   10433 	case WM_T_82576:
   10434 	case WM_T_82580:
   10435 	case WM_T_I350:
   10436 	case WM_T_I354:
   10437 	case WM_T_I210:
   10438 	case WM_T_I211:
   10439 	case WM_T_80003:
   10440 		/* Null */
   10441 		break;
   10442 	case WM_T_82541:
   10443 	case WM_T_82547:
   10444 		/* XXX Configure actively LED after PHY reset */
   10445 		break;
   10446 	case WM_T_ICH8:
   10447 	case WM_T_ICH9:
   10448 	case WM_T_ICH10:
   10449 	case WM_T_PCH:
   10450 	case WM_T_PCH2:
   10451 	case WM_T_PCH_LPT:
   10452 	case WM_T_PCH_SPT:
   10453 	case WM_T_PCH_CNP:
   10454 		wm_phy_post_reset(sc);
   10455 		break;
   10456 	default:
   10457 		panic("%s: unknown type\n", __func__);
   10458 		break;
   10459 	}
   10460 }
   10461 
   10462 /*
   10463  * Set up sc_phytype and mii_{read|write}reg.
   10464  *
   10465  *  To identify PHY type, correct read/write function should be selected.
   10466  * To select correct read/write function, PCI ID or MAC type are required
   10467  * without accessing PHY registers.
   10468  *
   10469  *  On the first call of this function, PHY ID is not known yet. Check
   10470  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10471  * result might be incorrect.
   10472  *
   10473  *  In the second call, PHY OUI and model is used to identify PHY type.
   10474  * It might not be perfect because of the lack of compared entry, but it
   10475  * would be better than the first call.
   10476  *
   10477  *  If the detected new result and previous assumption is different,
   10478  * a diagnostic message will be printed.
   10479  */
   10480 static void
   10481 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10482     uint16_t phy_model)
   10483 {
   10484 	device_t dev = sc->sc_dev;
   10485 	struct mii_data *mii = &sc->sc_mii;
   10486 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10487 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10488 	mii_readreg_t new_readreg;
   10489 	mii_writereg_t new_writereg;
   10490 	bool dodiag = true;
   10491 
   10492 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   10493 		device_xname(sc->sc_dev), __func__));
   10494 
   10495 	/*
   10496 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10497 	 * incorrect. So don't print diag output when it's 2nd call.
   10498 	 */
   10499 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10500 		dodiag = false;
   10501 
   10502 	if (mii->mii_readreg == NULL) {
   10503 		/*
   10504 		 *  This is the first call of this function. For ICH and PCH
   10505 		 * variants, it's difficult to determine the PHY access method
   10506 		 * by sc_type, so use the PCI product ID for some devices.
   10507 		 */
   10508 
   10509 		switch (sc->sc_pcidevid) {
   10510 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10511 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10512 			/* 82577 */
   10513 			new_phytype = WMPHY_82577;
   10514 			break;
   10515 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10516 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10517 			/* 82578 */
   10518 			new_phytype = WMPHY_82578;
   10519 			break;
   10520 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10521 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10522 			/* 82579 */
   10523 			new_phytype = WMPHY_82579;
   10524 			break;
   10525 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10526 		case PCI_PRODUCT_INTEL_82801I_BM:
   10527 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10528 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10529 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10530 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10531 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10532 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10533 			/* ICH8, 9, 10 with 82567 */
   10534 			new_phytype = WMPHY_BM;
   10535 			break;
   10536 		default:
   10537 			break;
   10538 		}
   10539 	} else {
   10540 		/* It's not the first call. Use PHY OUI and model */
   10541 		switch (phy_oui) {
   10542 		case MII_OUI_ATTANSIC: /* atphy(4) */
   10543 			switch (phy_model) {
   10544 			case MII_MODEL_ATTANSIC_AR8021:
   10545 				new_phytype = WMPHY_82578;
   10546 				break;
   10547 			default:
   10548 				break;
   10549 			}
   10550 			break;
   10551 		case MII_OUI_xxMARVELL:
   10552 			switch (phy_model) {
   10553 			case MII_MODEL_xxMARVELL_I210:
   10554 				new_phytype = WMPHY_I210;
   10555 				break;
   10556 			case MII_MODEL_xxMARVELL_E1011:
   10557 			case MII_MODEL_xxMARVELL_E1000_3:
   10558 			case MII_MODEL_xxMARVELL_E1000_5:
   10559 			case MII_MODEL_xxMARVELL_E1112:
   10560 				new_phytype = WMPHY_M88;
   10561 				break;
   10562 			case MII_MODEL_xxMARVELL_E1149:
   10563 				new_phytype = WMPHY_BM;
   10564 				break;
   10565 			case MII_MODEL_xxMARVELL_E1111:
   10566 			case MII_MODEL_xxMARVELL_I347:
   10567 			case MII_MODEL_xxMARVELL_E1512:
   10568 			case MII_MODEL_xxMARVELL_E1340M:
   10569 			case MII_MODEL_xxMARVELL_E1543:
   10570 				new_phytype = WMPHY_M88;
   10571 				break;
   10572 			case MII_MODEL_xxMARVELL_I82563:
   10573 				new_phytype = WMPHY_GG82563;
   10574 				break;
   10575 			default:
   10576 				break;
   10577 			}
   10578 			break;
   10579 		case MII_OUI_INTEL:
   10580 			switch (phy_model) {
   10581 			case MII_MODEL_INTEL_I82577:
   10582 				new_phytype = WMPHY_82577;
   10583 				break;
   10584 			case MII_MODEL_INTEL_I82579:
   10585 				new_phytype = WMPHY_82579;
   10586 				break;
   10587 			case MII_MODEL_INTEL_I217:
   10588 				new_phytype = WMPHY_I217;
   10589 				break;
   10590 			case MII_MODEL_INTEL_I82580:
   10591 				new_phytype = WMPHY_82580;
   10592 				break;
   10593 			case MII_MODEL_INTEL_I350:
   10594 				new_phytype = WMPHY_I350;
   10595 				break;
   10596 			default:
   10597 				break;
   10598 			}
   10599 			break;
   10600 		case MII_OUI_yyINTEL:
   10601 			switch (phy_model) {
   10602 			case MII_MODEL_yyINTEL_I82562G:
   10603 			case MII_MODEL_yyINTEL_I82562EM:
   10604 			case MII_MODEL_yyINTEL_I82562ET:
   10605 				new_phytype = WMPHY_IFE;
   10606 				break;
   10607 			case MII_MODEL_yyINTEL_IGP01E1000:
   10608 				new_phytype = WMPHY_IGP;
   10609 				break;
   10610 			case MII_MODEL_yyINTEL_I82566:
   10611 				new_phytype = WMPHY_IGP_3;
   10612 				break;
   10613 			default:
   10614 				break;
   10615 			}
   10616 			break;
   10617 		default:
   10618 			break;
   10619 		}
   10620 
   10621 		if (dodiag) {
   10622 			if (new_phytype == WMPHY_UNKNOWN)
   10623 				aprint_verbose_dev(dev,
   10624 				    "%s: Unknown PHY model. OUI=%06x, "
   10625 				    "model=%04x\n", __func__, phy_oui,
   10626 				    phy_model);
   10627 
   10628 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10629 			    && (sc->sc_phytype != new_phytype)) {
   10630 				aprint_error_dev(dev, "Previously assumed PHY "
   10631 				    "type(%u) was incorrect. PHY type from PHY"
   10632 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10633 			}
   10634 		}
   10635 	}
   10636 
   10637 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10638 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10639 		/* SGMII */
   10640 		new_readreg = wm_sgmii_readreg;
   10641 		new_writereg = wm_sgmii_writereg;
   10642 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10643 		/* BM2 (phyaddr == 1) */
   10644 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10645 		    && (new_phytype != WMPHY_BM)
   10646 		    && (new_phytype != WMPHY_UNKNOWN))
   10647 			doubt_phytype = new_phytype;
   10648 		new_phytype = WMPHY_BM;
   10649 		new_readreg = wm_gmii_bm_readreg;
   10650 		new_writereg = wm_gmii_bm_writereg;
   10651 	} else if (sc->sc_type >= WM_T_PCH) {
   10652 		/* All PCH* use _hv_ */
   10653 		new_readreg = wm_gmii_hv_readreg;
   10654 		new_writereg = wm_gmii_hv_writereg;
   10655 	} else if (sc->sc_type >= WM_T_ICH8) {
   10656 		/* non-82567 ICH8, 9 and 10 */
   10657 		new_readreg = wm_gmii_i82544_readreg;
   10658 		new_writereg = wm_gmii_i82544_writereg;
   10659 	} else if (sc->sc_type >= WM_T_80003) {
   10660 		/* 80003 */
   10661 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10662 		    && (new_phytype != WMPHY_GG82563)
   10663 		    && (new_phytype != WMPHY_UNKNOWN))
   10664 			doubt_phytype = new_phytype;
   10665 		new_phytype = WMPHY_GG82563;
   10666 		new_readreg = wm_gmii_i80003_readreg;
   10667 		new_writereg = wm_gmii_i80003_writereg;
   10668 	} else if (sc->sc_type >= WM_T_I210) {
   10669 		/* I210 and I211 */
   10670 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10671 		    && (new_phytype != WMPHY_I210)
   10672 		    && (new_phytype != WMPHY_UNKNOWN))
   10673 			doubt_phytype = new_phytype;
   10674 		new_phytype = WMPHY_I210;
   10675 		new_readreg = wm_gmii_gs40g_readreg;
   10676 		new_writereg = wm_gmii_gs40g_writereg;
   10677 	} else if (sc->sc_type >= WM_T_82580) {
   10678 		/* 82580, I350 and I354 */
   10679 		new_readreg = wm_gmii_82580_readreg;
   10680 		new_writereg = wm_gmii_82580_writereg;
   10681 	} else if (sc->sc_type >= WM_T_82544) {
   10682 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10683 		new_readreg = wm_gmii_i82544_readreg;
   10684 		new_writereg = wm_gmii_i82544_writereg;
   10685 	} else {
   10686 		new_readreg = wm_gmii_i82543_readreg;
   10687 		new_writereg = wm_gmii_i82543_writereg;
   10688 	}
   10689 
   10690 	if (new_phytype == WMPHY_BM) {
   10691 		/* All BM use _bm_ */
   10692 		new_readreg = wm_gmii_bm_readreg;
   10693 		new_writereg = wm_gmii_bm_writereg;
   10694 	}
   10695 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10696 		/* All PCH* use _hv_ */
   10697 		new_readreg = wm_gmii_hv_readreg;
   10698 		new_writereg = wm_gmii_hv_writereg;
   10699 	}
   10700 
   10701 	/* Diag output */
   10702 	if (dodiag) {
   10703 		if (doubt_phytype != WMPHY_UNKNOWN)
   10704 			aprint_error_dev(dev, "Assumed new PHY type was "
   10705 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10706 			    new_phytype);
   10707 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10708 		    && (sc->sc_phytype != new_phytype))
   10709 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10710 			    "was incorrect. New PHY type = %u\n",
   10711 			    sc->sc_phytype, new_phytype);
   10712 
   10713 		if ((mii->mii_readreg != NULL) &&
   10714 		    (new_phytype == WMPHY_UNKNOWN))
   10715 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10716 
   10717 		if ((mii->mii_readreg != NULL) &&
   10718 		    (mii->mii_readreg != new_readreg))
   10719 			aprint_error_dev(dev, "Previously assumed PHY "
   10720 			    "read/write function was incorrect.\n");
   10721 	}
   10722 
   10723 	/* Update now */
   10724 	sc->sc_phytype = new_phytype;
   10725 	mii->mii_readreg = new_readreg;
   10726 	mii->mii_writereg = new_writereg;
   10727 	if (new_readreg == wm_gmii_hv_readreg) {
   10728 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10729 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10730 	} else if (new_readreg == wm_sgmii_readreg) {
   10731 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10732 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10733 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10734 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10735 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10736 	}
   10737 }
   10738 
   10739 /*
   10740  * wm_get_phy_id_82575:
   10741  *
   10742  * Return PHY ID. Return -1 if it failed.
   10743  */
   10744 static int
   10745 wm_get_phy_id_82575(struct wm_softc *sc)
   10746 {
   10747 	uint32_t reg;
   10748 	int phyid = -1;
   10749 
   10750 	/* XXX */
   10751 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10752 		return -1;
   10753 
   10754 	if (wm_sgmii_uses_mdio(sc)) {
   10755 		switch (sc->sc_type) {
   10756 		case WM_T_82575:
   10757 		case WM_T_82576:
   10758 			reg = CSR_READ(sc, WMREG_MDIC);
   10759 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10760 			break;
   10761 		case WM_T_82580:
   10762 		case WM_T_I350:
   10763 		case WM_T_I354:
   10764 		case WM_T_I210:
   10765 		case WM_T_I211:
   10766 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10767 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10768 			break;
   10769 		default:
   10770 			return -1;
   10771 		}
   10772 	}
   10773 
   10774 	return phyid;
   10775 }
   10776 
   10777 /*
   10778  * wm_gmii_mediainit:
   10779  *
   10780  *	Initialize media for use on 1000BASE-T devices.
   10781  */
   10782 static void
   10783 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10784 {
   10785 	device_t dev = sc->sc_dev;
   10786 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10787 	struct mii_data *mii = &sc->sc_mii;
   10788 
   10789 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10790 		device_xname(sc->sc_dev), __func__));
   10791 
   10792 	/* We have GMII. */
   10793 	sc->sc_flags |= WM_F_HAS_MII;
   10794 
   10795 	if (sc->sc_type == WM_T_80003)
   10796 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10797 	else
   10798 		sc->sc_tipg = TIPG_1000T_DFLT;
   10799 
   10800 	/*
   10801 	 * Let the chip set speed/duplex on its own based on
   10802 	 * signals from the PHY.
   10803 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10804 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10805 	 */
   10806 	sc->sc_ctrl |= CTRL_SLU;
   10807 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10808 
   10809 	/* Initialize our media structures and probe the GMII. */
   10810 	mii->mii_ifp = ifp;
   10811 
   10812 	mii->mii_statchg = wm_gmii_statchg;
   10813 
   10814 	/* get PHY control from SMBus to PCIe */
   10815 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10816 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10817 	    || (sc->sc_type == WM_T_PCH_CNP))
   10818 		wm_init_phy_workarounds_pchlan(sc);
   10819 
   10820 	wm_gmii_reset(sc);
   10821 
   10822 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10823 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10824 	    wm_gmii_mediastatus, sc->sc_core_lock);
   10825 
   10826 	/* Setup internal SGMII PHY for SFP */
   10827 	wm_sgmii_sfp_preconfig(sc);
   10828 
   10829 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10830 	    || (sc->sc_type == WM_T_82580)
   10831 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10832 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10833 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10834 			/* Attach only one port */
   10835 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10836 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10837 		} else {
   10838 			int i, id;
   10839 			uint32_t ctrl_ext;
   10840 
   10841 			id = wm_get_phy_id_82575(sc);
   10842 			if (id != -1) {
   10843 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10844 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10845 			}
   10846 			if ((id == -1)
   10847 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10848 				/* Power on sgmii phy if it is disabled */
   10849 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10850 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10851 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10852 				CSR_WRITE_FLUSH(sc);
   10853 				delay(300*1000); /* XXX too long */
   10854 
   10855 				/*
   10856 				 * From 1 to 8.
   10857 				 *
   10858 				 * I2C access fails with I2C register's ERROR
   10859 				 * bit set, so prevent error message while
   10860 				 * scanning.
   10861 				 */
   10862 				sc->phy.no_errprint = true;
   10863 				for (i = 1; i < 8; i++)
   10864 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10865 					    0xffffffff, i, MII_OFFSET_ANY,
   10866 					    MIIF_DOPAUSE);
   10867 				sc->phy.no_errprint = false;
   10868 
   10869 				/* Restore previous sfp cage power state */
   10870 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10871 			}
   10872 		}
   10873 	} else
   10874 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10875 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10876 
   10877 	/*
   10878 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10879 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10880 	 */
   10881 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10882 		|| (sc->sc_type == WM_T_PCH_SPT)
   10883 		|| (sc->sc_type == WM_T_PCH_CNP))
   10884 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10885 		wm_set_mdio_slow_mode_hv(sc);
   10886 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10887 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10888 	}
   10889 
   10890 	/*
   10891 	 * (For ICH8 variants)
   10892 	 * If PHY detection failed, use BM's r/w function and retry.
   10893 	 */
   10894 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10895 		/* if failed, retry with *_bm_* */
   10896 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10897 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10898 		    sc->sc_phytype);
   10899 		sc->sc_phytype = WMPHY_BM;
   10900 		mii->mii_readreg = wm_gmii_bm_readreg;
   10901 		mii->mii_writereg = wm_gmii_bm_writereg;
   10902 
   10903 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10904 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10905 	}
   10906 
   10907 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10908 		/* Any PHY wasn't found */
   10909 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10910 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10911 		sc->sc_phytype = WMPHY_NONE;
   10912 	} else {
   10913 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10914 
   10915 		/*
   10916 		 * PHY found! Check PHY type again by the second call of
   10917 		 * wm_gmii_setup_phytype.
   10918 		 */
   10919 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10920 		    child->mii_mpd_model);
   10921 
   10922 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10923 	}
   10924 }
   10925 
   10926 /*
   10927  * wm_gmii_mediachange:	[ifmedia interface function]
   10928  *
   10929  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10930  */
   10931 static int
   10932 wm_gmii_mediachange(struct ifnet *ifp)
   10933 {
   10934 	struct wm_softc *sc = ifp->if_softc;
   10935 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10936 	uint32_t reg;
   10937 	int rc;
   10938 
   10939 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   10940 		device_xname(sc->sc_dev), __func__));
   10941 	if ((ifp->if_flags & IFF_UP) == 0)
   10942 		return 0;
   10943 
   10944 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10945 	if ((sc->sc_type == WM_T_82580)
   10946 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10947 	    || (sc->sc_type == WM_T_I211)) {
   10948 		reg = CSR_READ(sc, WMREG_PHPM);
   10949 		reg &= ~PHPM_GO_LINK_D;
   10950 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10951 	}
   10952 
   10953 	/* Disable D0 LPLU. */
   10954 	wm_lplu_d0_disable(sc);
   10955 
   10956 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10957 	sc->sc_ctrl |= CTRL_SLU;
   10958 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10959 	    || (sc->sc_type > WM_T_82543)) {
   10960 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10961 	} else {
   10962 		sc->sc_ctrl &= ~CTRL_ASDE;
   10963 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10964 		if (ife->ifm_media & IFM_FDX)
   10965 			sc->sc_ctrl |= CTRL_FD;
   10966 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10967 		case IFM_10_T:
   10968 			sc->sc_ctrl |= CTRL_SPEED_10;
   10969 			break;
   10970 		case IFM_100_TX:
   10971 			sc->sc_ctrl |= CTRL_SPEED_100;
   10972 			break;
   10973 		case IFM_1000_T:
   10974 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10975 			break;
   10976 		case IFM_NONE:
   10977 			/* There is no specific setting for IFM_NONE */
   10978 			break;
   10979 		default:
   10980 			panic("wm_gmii_mediachange: bad media 0x%x",
   10981 			    ife->ifm_media);
   10982 		}
   10983 	}
   10984 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10985 	CSR_WRITE_FLUSH(sc);
   10986 
   10987 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10988 		wm_serdes_mediachange(ifp);
   10989 
   10990 	if (sc->sc_type <= WM_T_82543)
   10991 		wm_gmii_reset(sc);
   10992 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10993 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10994 		/* allow time for SFP cage time to power up phy */
   10995 		delay(300 * 1000);
   10996 		wm_gmii_reset(sc);
   10997 	}
   10998 
   10999 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   11000 		return 0;
   11001 	return rc;
   11002 }
   11003 
   11004 /*
   11005  * wm_gmii_mediastatus:	[ifmedia interface function]
   11006  *
   11007  *	Get the current interface media status on a 1000BASE-T device.
   11008  */
   11009 static void
   11010 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11011 {
   11012 	struct wm_softc *sc = ifp->if_softc;
   11013 
   11014 	ether_mediastatus(ifp, ifmr);
   11015 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11016 	    | sc->sc_flowflags;
   11017 }
   11018 
   11019 #define	MDI_IO		CTRL_SWDPIN(2)
   11020 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   11021 #define	MDI_CLK		CTRL_SWDPIN(3)
   11022 
   11023 static void
   11024 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   11025 {
   11026 	uint32_t i, v;
   11027 
   11028 	v = CSR_READ(sc, WMREG_CTRL);
   11029 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11030 	v |= MDI_DIR | CTRL_SWDPIO(3);
   11031 
   11032 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   11033 		if (data & i)
   11034 			v |= MDI_IO;
   11035 		else
   11036 			v &= ~MDI_IO;
   11037 		CSR_WRITE(sc, WMREG_CTRL, v);
   11038 		CSR_WRITE_FLUSH(sc);
   11039 		delay(10);
   11040 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11041 		CSR_WRITE_FLUSH(sc);
   11042 		delay(10);
   11043 		CSR_WRITE(sc, WMREG_CTRL, v);
   11044 		CSR_WRITE_FLUSH(sc);
   11045 		delay(10);
   11046 	}
   11047 }
   11048 
   11049 static uint16_t
   11050 wm_i82543_mii_recvbits(struct wm_softc *sc)
   11051 {
   11052 	uint32_t v, i;
   11053 	uint16_t data = 0;
   11054 
   11055 	v = CSR_READ(sc, WMREG_CTRL);
   11056 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   11057 	v |= CTRL_SWDPIO(3);
   11058 
   11059 	CSR_WRITE(sc, WMREG_CTRL, v);
   11060 	CSR_WRITE_FLUSH(sc);
   11061 	delay(10);
   11062 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11063 	CSR_WRITE_FLUSH(sc);
   11064 	delay(10);
   11065 	CSR_WRITE(sc, WMREG_CTRL, v);
   11066 	CSR_WRITE_FLUSH(sc);
   11067 	delay(10);
   11068 
   11069 	for (i = 0; i < 16; i++) {
   11070 		data <<= 1;
   11071 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11072 		CSR_WRITE_FLUSH(sc);
   11073 		delay(10);
   11074 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   11075 			data |= 1;
   11076 		CSR_WRITE(sc, WMREG_CTRL, v);
   11077 		CSR_WRITE_FLUSH(sc);
   11078 		delay(10);
   11079 	}
   11080 
   11081 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   11082 	CSR_WRITE_FLUSH(sc);
   11083 	delay(10);
   11084 	CSR_WRITE(sc, WMREG_CTRL, v);
   11085 	CSR_WRITE_FLUSH(sc);
   11086 	delay(10);
   11087 
   11088 	return data;
   11089 }
   11090 
   11091 #undef MDI_IO
   11092 #undef MDI_DIR
   11093 #undef MDI_CLK
   11094 
   11095 /*
   11096  * wm_gmii_i82543_readreg:	[mii interface function]
   11097  *
   11098  *	Read a PHY register on the GMII (i82543 version).
   11099  */
   11100 static int
   11101 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11102 {
   11103 	struct wm_softc *sc = device_private(dev);
   11104 
   11105 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11106 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   11107 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   11108 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   11109 
   11110 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   11111 		device_xname(dev), phy, reg, *val));
   11112 
   11113 	return 0;
   11114 }
   11115 
   11116 /*
   11117  * wm_gmii_i82543_writereg:	[mii interface function]
   11118  *
   11119  *	Write a PHY register on the GMII (i82543 version).
   11120  */
   11121 static int
   11122 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   11123 {
   11124 	struct wm_softc *sc = device_private(dev);
   11125 
   11126 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   11127 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   11128 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   11129 	    (MII_COMMAND_START << 30), 32);
   11130 
   11131 	return 0;
   11132 }
   11133 
   11134 /*
   11135  * wm_gmii_mdic_readreg:	[mii interface function]
   11136  *
   11137  *	Read a PHY register on the GMII.
   11138  */
   11139 static int
   11140 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11141 {
   11142 	struct wm_softc *sc = device_private(dev);
   11143 	uint32_t mdic = 0;
   11144 	int i;
   11145 
   11146 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11147 	    && (reg > MII_ADDRMASK)) {
   11148 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11149 		    __func__, sc->sc_phytype, reg);
   11150 		reg &= MII_ADDRMASK;
   11151 	}
   11152 
   11153 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   11154 	    MDIC_REGADD(reg));
   11155 
   11156 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11157 		delay(50);
   11158 		mdic = CSR_READ(sc, WMREG_MDIC);
   11159 		if (mdic & MDIC_READY)
   11160 			break;
   11161 	}
   11162 
   11163 	if ((mdic & MDIC_READY) == 0) {
   11164 		DPRINTF(sc, WM_DEBUG_GMII,
   11165 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   11166 			device_xname(dev), phy, reg));
   11167 		return ETIMEDOUT;
   11168 	} else if (mdic & MDIC_E) {
   11169 		/* This is normal if no PHY is present. */
   11170 		DPRINTF(sc, WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   11171 			device_xname(sc->sc_dev), phy, reg));
   11172 		return -1;
   11173 	} else
   11174 		*val = MDIC_DATA(mdic);
   11175 
   11176 	/*
   11177 	 * Allow some time after each MDIC transaction to avoid
   11178 	 * reading duplicate data in the next MDIC transaction.
   11179 	 */
   11180 	if (sc->sc_type == WM_T_PCH2)
   11181 		delay(100);
   11182 
   11183 	return 0;
   11184 }
   11185 
   11186 /*
   11187  * wm_gmii_mdic_writereg:	[mii interface function]
   11188  *
   11189  *	Write a PHY register on the GMII.
   11190  */
   11191 static int
   11192 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   11193 {
   11194 	struct wm_softc *sc = device_private(dev);
   11195 	uint32_t mdic = 0;
   11196 	int i;
   11197 
   11198 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   11199 	    && (reg > MII_ADDRMASK)) {
   11200 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11201 		    __func__, sc->sc_phytype, reg);
   11202 		reg &= MII_ADDRMASK;
   11203 	}
   11204 
   11205 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   11206 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   11207 
   11208 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   11209 		delay(50);
   11210 		mdic = CSR_READ(sc, WMREG_MDIC);
   11211 		if (mdic & MDIC_READY)
   11212 			break;
   11213 	}
   11214 
   11215 	if ((mdic & MDIC_READY) == 0) {
   11216 		DPRINTF(sc, WM_DEBUG_GMII,
   11217 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   11218 			device_xname(dev), phy, reg));
   11219 		return ETIMEDOUT;
   11220 	} else if (mdic & MDIC_E) {
   11221 		DPRINTF(sc, WM_DEBUG_GMII,
   11222 		    ("%s: MDIC write error: phy %d reg %d\n",
   11223 			device_xname(dev), phy, reg));
   11224 		return -1;
   11225 	}
   11226 
   11227 	/*
   11228 	 * Allow some time after each MDIC transaction to avoid
   11229 	 * reading duplicate data in the next MDIC transaction.
   11230 	 */
   11231 	if (sc->sc_type == WM_T_PCH2)
   11232 		delay(100);
   11233 
   11234 	return 0;
   11235 }
   11236 
   11237 /*
   11238  * wm_gmii_i82544_readreg:	[mii interface function]
   11239  *
   11240  *	Read a PHY register on the GMII.
   11241  */
   11242 static int
   11243 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11244 {
   11245 	struct wm_softc *sc = device_private(dev);
   11246 	int rv;
   11247 
   11248 	if (sc->phy.acquire(sc)) {
   11249 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11250 		return -1;
   11251 	}
   11252 
   11253 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   11254 
   11255 	sc->phy.release(sc);
   11256 
   11257 	return rv;
   11258 }
   11259 
   11260 static int
   11261 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11262 {
   11263 	struct wm_softc *sc = device_private(dev);
   11264 	int rv;
   11265 
   11266 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11267 		switch (sc->sc_phytype) {
   11268 		case WMPHY_IGP:
   11269 		case WMPHY_IGP_2:
   11270 		case WMPHY_IGP_3:
   11271 			rv = wm_gmii_mdic_writereg(dev, phy,
   11272 			    IGPHY_PAGE_SELECT, reg);
   11273 			if (rv != 0)
   11274 				return rv;
   11275 			break;
   11276 		default:
   11277 #ifdef WM_DEBUG
   11278 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   11279 			    __func__, sc->sc_phytype, reg);
   11280 #endif
   11281 			break;
   11282 		}
   11283 	}
   11284 
   11285 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11286 }
   11287 
   11288 /*
   11289  * wm_gmii_i82544_writereg:	[mii interface function]
   11290  *
   11291  *	Write a PHY register on the GMII.
   11292  */
   11293 static int
   11294 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   11295 {
   11296 	struct wm_softc *sc = device_private(dev);
   11297 	int rv;
   11298 
   11299 	if (sc->phy.acquire(sc)) {
   11300 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11301 		return -1;
   11302 	}
   11303 
   11304 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   11305 	sc->phy.release(sc);
   11306 
   11307 	return rv;
   11308 }
   11309 
   11310 static int
   11311 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11312 {
   11313 	struct wm_softc *sc = device_private(dev);
   11314 	int rv;
   11315 
   11316 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11317 		switch (sc->sc_phytype) {
   11318 		case WMPHY_IGP:
   11319 		case WMPHY_IGP_2:
   11320 		case WMPHY_IGP_3:
   11321 			rv = wm_gmii_mdic_writereg(dev, phy,
   11322 			    IGPHY_PAGE_SELECT, reg);
   11323 			if (rv != 0)
   11324 				return rv;
   11325 			break;
   11326 		default:
   11327 #ifdef WM_DEBUG
   11328 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   11329 			    __func__, sc->sc_phytype, reg);
   11330 #endif
   11331 			break;
   11332 		}
   11333 	}
   11334 
   11335 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11336 }
   11337 
   11338 /*
   11339  * wm_gmii_i80003_readreg:	[mii interface function]
   11340  *
   11341  *	Read a PHY register on the kumeran
   11342  * This could be handled by the PHY layer if we didn't have to lock the
   11343  * resource ...
   11344  */
   11345 static int
   11346 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11347 {
   11348 	struct wm_softc *sc = device_private(dev);
   11349 	int page_select;
   11350 	uint16_t temp, temp2;
   11351 	int rv = 0;
   11352 
   11353 	if (phy != 1) /* Only one PHY on kumeran bus */
   11354 		return -1;
   11355 
   11356 	if (sc->phy.acquire(sc)) {
   11357 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11358 		return -1;
   11359 	}
   11360 
   11361 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11362 		page_select = GG82563_PHY_PAGE_SELECT;
   11363 	else {
   11364 		/*
   11365 		 * Use Alternative Page Select register to access registers
   11366 		 * 30 and 31.
   11367 		 */
   11368 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11369 	}
   11370 	temp = reg >> GG82563_PAGE_SHIFT;
   11371 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11372 		goto out;
   11373 
   11374 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11375 		/*
   11376 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11377 		 * register.
   11378 		 */
   11379 		delay(200);
   11380 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11381 		if ((rv != 0) || (temp2 != temp)) {
   11382 			device_printf(dev, "%s failed\n", __func__);
   11383 			rv = -1;
   11384 			goto out;
   11385 		}
   11386 		delay(200);
   11387 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11388 		delay(200);
   11389 	} else
   11390 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11391 
   11392 out:
   11393 	sc->phy.release(sc);
   11394 	return rv;
   11395 }
   11396 
   11397 /*
   11398  * wm_gmii_i80003_writereg:	[mii interface function]
   11399  *
   11400  *	Write a PHY register on the kumeran.
   11401  * This could be handled by the PHY layer if we didn't have to lock the
   11402  * resource ...
   11403  */
   11404 static int
   11405 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11406 {
   11407 	struct wm_softc *sc = device_private(dev);
   11408 	int page_select, rv;
   11409 	uint16_t temp, temp2;
   11410 
   11411 	if (phy != 1) /* Only one PHY on kumeran bus */
   11412 		return -1;
   11413 
   11414 	if (sc->phy.acquire(sc)) {
   11415 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11416 		return -1;
   11417 	}
   11418 
   11419 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11420 		page_select = GG82563_PHY_PAGE_SELECT;
   11421 	else {
   11422 		/*
   11423 		 * Use Alternative Page Select register to access registers
   11424 		 * 30 and 31.
   11425 		 */
   11426 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11427 	}
   11428 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11429 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11430 		goto out;
   11431 
   11432 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11433 		/*
   11434 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11435 		 * register.
   11436 		 */
   11437 		delay(200);
   11438 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11439 		if ((rv != 0) || (temp2 != temp)) {
   11440 			device_printf(dev, "%s failed\n", __func__);
   11441 			rv = -1;
   11442 			goto out;
   11443 		}
   11444 		delay(200);
   11445 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11446 		delay(200);
   11447 	} else
   11448 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11449 
   11450 out:
   11451 	sc->phy.release(sc);
   11452 	return rv;
   11453 }
   11454 
   11455 /*
   11456  * wm_gmii_bm_readreg:	[mii interface function]
   11457  *
   11458  *	Read a PHY register on the kumeran
   11459  * This could be handled by the PHY layer if we didn't have to lock the
   11460  * resource ...
   11461  */
   11462 static int
   11463 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11464 {
   11465 	struct wm_softc *sc = device_private(dev);
   11466 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11467 	int rv;
   11468 
   11469 	if (sc->phy.acquire(sc)) {
   11470 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11471 		return -1;
   11472 	}
   11473 
   11474 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11475 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11476 		    || (reg == 31)) ? 1 : phy;
   11477 	/* Page 800 works differently than the rest so it has its own func */
   11478 	if (page == BM_WUC_PAGE) {
   11479 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11480 		goto release;
   11481 	}
   11482 
   11483 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11484 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11485 		    && (sc->sc_type != WM_T_82583))
   11486 			rv = wm_gmii_mdic_writereg(dev, phy,
   11487 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11488 		else
   11489 			rv = wm_gmii_mdic_writereg(dev, phy,
   11490 			    BME1000_PHY_PAGE_SELECT, page);
   11491 		if (rv != 0)
   11492 			goto release;
   11493 	}
   11494 
   11495 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11496 
   11497 release:
   11498 	sc->phy.release(sc);
   11499 	return rv;
   11500 }
   11501 
   11502 /*
   11503  * wm_gmii_bm_writereg:	[mii interface function]
   11504  *
   11505  *	Write a PHY register on the kumeran.
   11506  * This could be handled by the PHY layer if we didn't have to lock the
   11507  * resource ...
   11508  */
   11509 static int
   11510 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11511 {
   11512 	struct wm_softc *sc = device_private(dev);
   11513 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11514 	int rv;
   11515 
   11516 	if (sc->phy.acquire(sc)) {
   11517 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11518 		return -1;
   11519 	}
   11520 
   11521 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11522 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11523 		    || (reg == 31)) ? 1 : phy;
   11524 	/* Page 800 works differently than the rest so it has its own func */
   11525 	if (page == BM_WUC_PAGE) {
   11526 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11527 		goto release;
   11528 	}
   11529 
   11530 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11531 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11532 		    && (sc->sc_type != WM_T_82583))
   11533 			rv = wm_gmii_mdic_writereg(dev, phy,
   11534 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11535 		else
   11536 			rv = wm_gmii_mdic_writereg(dev, phy,
   11537 			    BME1000_PHY_PAGE_SELECT, page);
   11538 		if (rv != 0)
   11539 			goto release;
   11540 	}
   11541 
   11542 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11543 
   11544 release:
   11545 	sc->phy.release(sc);
   11546 	return rv;
   11547 }
   11548 
   11549 /*
   11550  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11551  *  @dev: pointer to the HW structure
   11552  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11553  *
   11554  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11555  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11556  */
   11557 static int
   11558 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11559 {
   11560 #ifdef WM_DEBUG
   11561 	struct wm_softc *sc = device_private(dev);
   11562 #endif
   11563 	uint16_t temp;
   11564 	int rv;
   11565 
   11566 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11567 		device_xname(dev), __func__));
   11568 
   11569 	if (!phy_regp)
   11570 		return -1;
   11571 
   11572 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11573 
   11574 	/* Select Port Control Registers page */
   11575 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11576 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11577 	if (rv != 0)
   11578 		return rv;
   11579 
   11580 	/* Read WUCE and save it */
   11581 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11582 	if (rv != 0)
   11583 		return rv;
   11584 
   11585 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11586 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11587 	 */
   11588 	temp = *phy_regp;
   11589 	temp |= BM_WUC_ENABLE_BIT;
   11590 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11591 
   11592 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11593 		return rv;
   11594 
   11595 	/* Select Host Wakeup Registers page - caller now able to write
   11596 	 * registers on the Wakeup registers page
   11597 	 */
   11598 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11599 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11600 }
   11601 
   11602 /*
   11603  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11604  *  @dev: pointer to the HW structure
   11605  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11606  *
   11607  *  Restore BM_WUC_ENABLE_REG to its original value.
   11608  *
   11609  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11610  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11611  *  caller.
   11612  */
   11613 static int
   11614 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11615 {
   11616 #ifdef WM_DEBUG
   11617 	struct wm_softc *sc = device_private(dev);
   11618 #endif
   11619 
   11620 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   11621 		device_xname(dev), __func__));
   11622 
   11623 	if (!phy_regp)
   11624 		return -1;
   11625 
   11626 	/* Select Port Control Registers page */
   11627 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11628 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11629 
   11630 	/* Restore 769.17 to its original value */
   11631 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11632 
   11633 	return 0;
   11634 }
   11635 
   11636 /*
   11637  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11638  *  @sc: pointer to the HW structure
   11639  *  @offset: register offset to be read or written
   11640  *  @val: pointer to the data to read or write
   11641  *  @rd: determines if operation is read or write
   11642  *  @page_set: BM_WUC_PAGE already set and access enabled
   11643  *
   11644  *  Read the PHY register at offset and store the retrieved information in
   11645  *  data, or write data to PHY register at offset.  Note the procedure to
   11646  *  access the PHY wakeup registers is different than reading the other PHY
   11647  *  registers. It works as such:
   11648  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11649  *  2) Set page to 800 for host (801 if we were manageability)
   11650  *  3) Write the address using the address opcode (0x11)
   11651  *  4) Read or write the data using the data opcode (0x12)
   11652  *  5) Restore 769.17.2 to its original value
   11653  *
   11654  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11655  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11656  *
   11657  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11658  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11659  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11660  */
   11661 static int
   11662 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11663 	bool page_set)
   11664 {
   11665 	struct wm_softc *sc = device_private(dev);
   11666 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11667 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11668 	uint16_t wuce;
   11669 	int rv = 0;
   11670 
   11671 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11672 		device_xname(dev), __func__));
   11673 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11674 	if ((sc->sc_type == WM_T_PCH)
   11675 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11676 		device_printf(dev,
   11677 		    "Attempting to access page %d while gig enabled.\n", page);
   11678 	}
   11679 
   11680 	if (!page_set) {
   11681 		/* Enable access to PHY wakeup registers */
   11682 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11683 		if (rv != 0) {
   11684 			device_printf(dev,
   11685 			    "%s: Could not enable PHY wakeup reg access\n",
   11686 			    __func__);
   11687 			return rv;
   11688 		}
   11689 	}
   11690 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11691 		device_xname(sc->sc_dev), __func__, page, regnum));
   11692 
   11693 	/*
   11694 	 * 2) Access PHY wakeup register.
   11695 	 * See wm_access_phy_wakeup_reg_bm.
   11696 	 */
   11697 
   11698 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11699 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11700 	if (rv != 0)
   11701 		return rv;
   11702 
   11703 	if (rd) {
   11704 		/* Read the Wakeup register page value using opcode 0x12 */
   11705 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11706 	} else {
   11707 		/* Write the Wakeup register page value using opcode 0x12 */
   11708 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11709 	}
   11710 	if (rv != 0)
   11711 		return rv;
   11712 
   11713 	if (!page_set)
   11714 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11715 
   11716 	return rv;
   11717 }
   11718 
   11719 /*
   11720  * wm_gmii_hv_readreg:	[mii interface function]
   11721  *
   11722  *	Read a PHY register on the kumeran
   11723  * This could be handled by the PHY layer if we didn't have to lock the
   11724  * resource ...
   11725  */
   11726 static int
   11727 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11728 {
   11729 	struct wm_softc *sc = device_private(dev);
   11730 	int rv;
   11731 
   11732 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11733 		device_xname(dev), __func__));
   11734 	if (sc->phy.acquire(sc)) {
   11735 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11736 		return -1;
   11737 	}
   11738 
   11739 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11740 	sc->phy.release(sc);
   11741 	return rv;
   11742 }
   11743 
   11744 static int
   11745 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11746 {
   11747 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11748 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11749 	int rv;
   11750 
   11751 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11752 
   11753 	/* Page 800 works differently than the rest so it has its own func */
   11754 	if (page == BM_WUC_PAGE)
   11755 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11756 
   11757 	/*
   11758 	 * Lower than page 768 works differently than the rest so it has its
   11759 	 * own func
   11760 	 */
   11761 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11762 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11763 		return -1;
   11764 	}
   11765 
   11766 	/*
   11767 	 * XXX I21[789] documents say that the SMBus Address register is at
   11768 	 * PHY address 01, Page 0 (not 768), Register 26.
   11769 	 */
   11770 	if (page == HV_INTC_FC_PAGE_START)
   11771 		page = 0;
   11772 
   11773 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11774 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
   11775 		    page << BME1000_PAGE_SHIFT);
   11776 		if (rv != 0)
   11777 			return rv;
   11778 	}
   11779 
   11780 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11781 }
   11782 
   11783 /*
   11784  * wm_gmii_hv_writereg:	[mii interface function]
   11785  *
   11786  *	Write a PHY register on the kumeran.
   11787  * This could be handled by the PHY layer if we didn't have to lock the
   11788  * resource ...
   11789  */
   11790 static int
   11791 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11792 {
   11793 	struct wm_softc *sc = device_private(dev);
   11794 	int rv;
   11795 
   11796 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
   11797 		device_xname(dev), __func__));
   11798 
   11799 	if (sc->phy.acquire(sc)) {
   11800 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11801 		return -1;
   11802 	}
   11803 
   11804 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11805 	sc->phy.release(sc);
   11806 
   11807 	return rv;
   11808 }
   11809 
   11810 static int
   11811 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11812 {
   11813 	struct wm_softc *sc = device_private(dev);
   11814 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11815 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11816 	int rv;
   11817 
   11818 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11819 
   11820 	/* Page 800 works differently than the rest so it has its own func */
   11821 	if (page == BM_WUC_PAGE)
   11822 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11823 		    false);
   11824 
   11825 	/*
   11826 	 * Lower than page 768 works differently than the rest so it has its
   11827 	 * own func
   11828 	 */
   11829 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11830 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11831 		return -1;
   11832 	}
   11833 
   11834 	{
   11835 		/*
   11836 		 * XXX I21[789] documents say that the SMBus Address register
   11837 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11838 		 */
   11839 		if (page == HV_INTC_FC_PAGE_START)
   11840 			page = 0;
   11841 
   11842 		/*
   11843 		 * XXX Workaround MDIO accesses being disabled after entering
   11844 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11845 		 * register is set)
   11846 		 */
   11847 		if (sc->sc_phytype == WMPHY_82578) {
   11848 			struct mii_softc *child;
   11849 
   11850 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11851 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11852 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11853 			    && ((val & (1 << 11)) != 0)) {
   11854 				device_printf(dev, "XXX need workaround\n");
   11855 			}
   11856 		}
   11857 
   11858 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11859 			rv = wm_gmii_mdic_writereg(dev, 1,
   11860 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11861 			if (rv != 0)
   11862 				return rv;
   11863 		}
   11864 	}
   11865 
   11866 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11867 }
   11868 
   11869 /*
   11870  * wm_gmii_82580_readreg:	[mii interface function]
   11871  *
   11872  *	Read a PHY register on the 82580 and I350.
   11873  * This could be handled by the PHY layer if we didn't have to lock the
   11874  * resource ...
   11875  */
   11876 static int
   11877 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11878 {
   11879 	struct wm_softc *sc = device_private(dev);
   11880 	int rv;
   11881 
   11882 	if (sc->phy.acquire(sc) != 0) {
   11883 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11884 		return -1;
   11885 	}
   11886 
   11887 #ifdef DIAGNOSTIC
   11888 	if (reg > MII_ADDRMASK) {
   11889 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11890 		    __func__, sc->sc_phytype, reg);
   11891 		reg &= MII_ADDRMASK;
   11892 	}
   11893 #endif
   11894 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11895 
   11896 	sc->phy.release(sc);
   11897 	return rv;
   11898 }
   11899 
   11900 /*
   11901  * wm_gmii_82580_writereg:	[mii interface function]
   11902  *
   11903  *	Write a PHY register on the 82580 and I350.
   11904  * This could be handled by the PHY layer if we didn't have to lock the
   11905  * resource ...
   11906  */
   11907 static int
   11908 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11909 {
   11910 	struct wm_softc *sc = device_private(dev);
   11911 	int rv;
   11912 
   11913 	if (sc->phy.acquire(sc) != 0) {
   11914 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11915 		return -1;
   11916 	}
   11917 
   11918 #ifdef DIAGNOSTIC
   11919 	if (reg > MII_ADDRMASK) {
   11920 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11921 		    __func__, sc->sc_phytype, reg);
   11922 		reg &= MII_ADDRMASK;
   11923 	}
   11924 #endif
   11925 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11926 
   11927 	sc->phy.release(sc);
   11928 	return rv;
   11929 }
   11930 
   11931 /*
   11932  * wm_gmii_gs40g_readreg:	[mii interface function]
   11933  *
   11934  *	Read a PHY register on the I2100 and I211.
   11935  * This could be handled by the PHY layer if we didn't have to lock the
   11936  * resource ...
   11937  */
   11938 static int
   11939 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11940 {
   11941 	struct wm_softc *sc = device_private(dev);
   11942 	int page, offset;
   11943 	int rv;
   11944 
   11945 	/* Acquire semaphore */
   11946 	if (sc->phy.acquire(sc)) {
   11947 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11948 		return -1;
   11949 	}
   11950 
   11951 	/* Page select */
   11952 	page = reg >> GS40G_PAGE_SHIFT;
   11953 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11954 	if (rv != 0)
   11955 		goto release;
   11956 
   11957 	/* Read reg */
   11958 	offset = reg & GS40G_OFFSET_MASK;
   11959 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11960 
   11961 release:
   11962 	sc->phy.release(sc);
   11963 	return rv;
   11964 }
   11965 
   11966 /*
   11967  * wm_gmii_gs40g_writereg:	[mii interface function]
   11968  *
   11969  *	Write a PHY register on the I210 and I211.
   11970  * This could be handled by the PHY layer if we didn't have to lock the
   11971  * resource ...
   11972  */
   11973 static int
   11974 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11975 {
   11976 	struct wm_softc *sc = device_private(dev);
   11977 	uint16_t page;
   11978 	int offset, rv;
   11979 
   11980 	/* Acquire semaphore */
   11981 	if (sc->phy.acquire(sc)) {
   11982 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11983 		return -1;
   11984 	}
   11985 
   11986 	/* Page select */
   11987 	page = reg >> GS40G_PAGE_SHIFT;
   11988 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11989 	if (rv != 0)
   11990 		goto release;
   11991 
   11992 	/* Write reg */
   11993 	offset = reg & GS40G_OFFSET_MASK;
   11994 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11995 
   11996 release:
   11997 	/* Release semaphore */
   11998 	sc->phy.release(sc);
   11999 	return rv;
   12000 }
   12001 
   12002 /*
   12003  * wm_gmii_statchg:	[mii interface function]
   12004  *
   12005  *	Callback from MII layer when media changes.
   12006  */
   12007 static void
   12008 wm_gmii_statchg(struct ifnet *ifp)
   12009 {
   12010 	struct wm_softc *sc = ifp->if_softc;
   12011 	struct mii_data *mii = &sc->sc_mii;
   12012 
   12013 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   12014 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12015 	sc->sc_fcrtl &= ~FCRTL_XONE;
   12016 
   12017 	/* Get flow control negotiation result. */
   12018 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   12019 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   12020 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   12021 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   12022 	}
   12023 
   12024 	if (sc->sc_flowflags & IFM_FLOW) {
   12025 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   12026 			sc->sc_ctrl |= CTRL_TFCE;
   12027 			sc->sc_fcrtl |= FCRTL_XONE;
   12028 		}
   12029 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   12030 			sc->sc_ctrl |= CTRL_RFCE;
   12031 	}
   12032 
   12033 	if (mii->mii_media_active & IFM_FDX) {
   12034 		DPRINTF(sc, WM_DEBUG_LINK,
   12035 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   12036 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12037 	} else {
   12038 		DPRINTF(sc, WM_DEBUG_LINK,
   12039 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   12040 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12041 	}
   12042 
   12043 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12044 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12045 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   12046 						 : WMREG_FCRTL, sc->sc_fcrtl);
   12047 	if (sc->sc_type == WM_T_80003) {
   12048 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   12049 		case IFM_1000_T:
   12050 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12051 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   12052 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   12053 			break;
   12054 		default:
   12055 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   12056 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   12057 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   12058 			break;
   12059 		}
   12060 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   12061 	}
   12062 }
   12063 
   12064 /* kumeran related (80003, ICH* and PCH*) */
   12065 
   12066 /*
   12067  * wm_kmrn_readreg:
   12068  *
   12069  *	Read a kumeran register
   12070  */
   12071 static int
   12072 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   12073 {
   12074 	int rv;
   12075 
   12076 	if (sc->sc_type == WM_T_80003)
   12077 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12078 	else
   12079 		rv = sc->phy.acquire(sc);
   12080 	if (rv != 0) {
   12081 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12082 		    __func__);
   12083 		return rv;
   12084 	}
   12085 
   12086 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   12087 
   12088 	if (sc->sc_type == WM_T_80003)
   12089 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12090 	else
   12091 		sc->phy.release(sc);
   12092 
   12093 	return rv;
   12094 }
   12095 
   12096 static int
   12097 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   12098 {
   12099 
   12100 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12101 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   12102 	    KUMCTRLSTA_REN);
   12103 	CSR_WRITE_FLUSH(sc);
   12104 	delay(2);
   12105 
   12106 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   12107 
   12108 	return 0;
   12109 }
   12110 
   12111 /*
   12112  * wm_kmrn_writereg:
   12113  *
   12114  *	Write a kumeran register
   12115  */
   12116 static int
   12117 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   12118 {
   12119 	int rv;
   12120 
   12121 	if (sc->sc_type == WM_T_80003)
   12122 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12123 	else
   12124 		rv = sc->phy.acquire(sc);
   12125 	if (rv != 0) {
   12126 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   12127 		    __func__);
   12128 		return rv;
   12129 	}
   12130 
   12131 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   12132 
   12133 	if (sc->sc_type == WM_T_80003)
   12134 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   12135 	else
   12136 		sc->phy.release(sc);
   12137 
   12138 	return rv;
   12139 }
   12140 
   12141 static int
   12142 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   12143 {
   12144 
   12145 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   12146 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   12147 
   12148 	return 0;
   12149 }
   12150 
   12151 /*
   12152  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   12153  * This access method is different from IEEE MMD.
   12154  */
   12155 static int
   12156 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   12157 {
   12158 	struct wm_softc *sc = device_private(dev);
   12159 	int rv;
   12160 
   12161 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   12162 	if (rv != 0)
   12163 		return rv;
   12164 
   12165 	if (rd)
   12166 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   12167 	else
   12168 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   12169 	return rv;
   12170 }
   12171 
   12172 static int
   12173 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   12174 {
   12175 
   12176 	return wm_access_emi_reg_locked(dev, reg, val, true);
   12177 }
   12178 
   12179 static int
   12180 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   12181 {
   12182 
   12183 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   12184 }
   12185 
   12186 /* SGMII related */
   12187 
   12188 /*
   12189  * wm_sgmii_uses_mdio
   12190  *
   12191  * Check whether the transaction is to the internal PHY or the external
   12192  * MDIO interface. Return true if it's MDIO.
   12193  */
   12194 static bool
   12195 wm_sgmii_uses_mdio(struct wm_softc *sc)
   12196 {
   12197 	uint32_t reg;
   12198 	bool ismdio = false;
   12199 
   12200 	switch (sc->sc_type) {
   12201 	case WM_T_82575:
   12202 	case WM_T_82576:
   12203 		reg = CSR_READ(sc, WMREG_MDIC);
   12204 		ismdio = ((reg & MDIC_DEST) != 0);
   12205 		break;
   12206 	case WM_T_82580:
   12207 	case WM_T_I350:
   12208 	case WM_T_I354:
   12209 	case WM_T_I210:
   12210 	case WM_T_I211:
   12211 		reg = CSR_READ(sc, WMREG_MDICNFG);
   12212 		ismdio = ((reg & MDICNFG_DEST) != 0);
   12213 		break;
   12214 	default:
   12215 		break;
   12216 	}
   12217 
   12218 	return ismdio;
   12219 }
   12220 
   12221 /* Setup internal SGMII PHY for SFP */
   12222 static void
   12223 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
   12224 {
   12225 	uint16_t id1, id2, phyreg;
   12226 	int i, rv;
   12227 
   12228 	if (((sc->sc_flags & WM_F_SGMII) == 0)
   12229 	    || ((sc->sc_flags & WM_F_SFP) == 0))
   12230 		return;
   12231 
   12232 	for (i = 0; i < MII_NPHY; i++) {
   12233 		sc->phy.no_errprint = true;
   12234 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
   12235 		if (rv != 0)
   12236 			continue;
   12237 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
   12238 		if (rv != 0)
   12239 			continue;
   12240 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
   12241 			continue;
   12242 		sc->phy.no_errprint = false;
   12243 
   12244 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
   12245 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
   12246 		phyreg |= ESSR_SGMII_WOC_COPPER;
   12247 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
   12248 		break;
   12249 	}
   12250 
   12251 }
   12252 
   12253 /*
   12254  * wm_sgmii_readreg:	[mii interface function]
   12255  *
   12256  *	Read a PHY register on the SGMII
   12257  * This could be handled by the PHY layer if we didn't have to lock the
   12258  * resource ...
   12259  */
   12260 static int
   12261 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   12262 {
   12263 	struct wm_softc *sc = device_private(dev);
   12264 	int rv;
   12265 
   12266 	if (sc->phy.acquire(sc)) {
   12267 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12268 		return -1;
   12269 	}
   12270 
   12271 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   12272 
   12273 	sc->phy.release(sc);
   12274 	return rv;
   12275 }
   12276 
   12277 static int
   12278 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   12279 {
   12280 	struct wm_softc *sc = device_private(dev);
   12281 	uint32_t i2ccmd;
   12282 	int i, rv = 0;
   12283 
   12284 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12285 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12286 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12287 
   12288 	/* Poll the ready bit */
   12289 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12290 		delay(50);
   12291 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12292 		if (i2ccmd & I2CCMD_READY)
   12293 			break;
   12294 	}
   12295 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12296 		device_printf(dev, "I2CCMD Read did not complete\n");
   12297 		rv = ETIMEDOUT;
   12298 	}
   12299 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12300 		if (!sc->phy.no_errprint)
   12301 			device_printf(dev, "I2CCMD Error bit set\n");
   12302 		rv = EIO;
   12303 	}
   12304 
   12305 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   12306 
   12307 	return rv;
   12308 }
   12309 
   12310 /*
   12311  * wm_sgmii_writereg:	[mii interface function]
   12312  *
   12313  *	Write a PHY register on the SGMII.
   12314  * This could be handled by the PHY layer if we didn't have to lock the
   12315  * resource ...
   12316  */
   12317 static int
   12318 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   12319 {
   12320 	struct wm_softc *sc = device_private(dev);
   12321 	int rv;
   12322 
   12323 	if (sc->phy.acquire(sc) != 0) {
   12324 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   12325 		return -1;
   12326 	}
   12327 
   12328 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   12329 
   12330 	sc->phy.release(sc);
   12331 
   12332 	return rv;
   12333 }
   12334 
   12335 static int
   12336 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   12337 {
   12338 	struct wm_softc *sc = device_private(dev);
   12339 	uint32_t i2ccmd;
   12340 	uint16_t swapdata;
   12341 	int rv = 0;
   12342 	int i;
   12343 
   12344 	/* Swap the data bytes for the I2C interface */
   12345 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   12346 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   12347 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   12348 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12349 
   12350 	/* Poll the ready bit */
   12351 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12352 		delay(50);
   12353 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12354 		if (i2ccmd & I2CCMD_READY)
   12355 			break;
   12356 	}
   12357 	if ((i2ccmd & I2CCMD_READY) == 0) {
   12358 		device_printf(dev, "I2CCMD Write did not complete\n");
   12359 		rv = ETIMEDOUT;
   12360 	}
   12361 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   12362 		device_printf(dev, "I2CCMD Error bit set\n");
   12363 		rv = EIO;
   12364 	}
   12365 
   12366 	return rv;
   12367 }
   12368 
   12369 /* TBI related */
   12370 
   12371 static bool
   12372 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   12373 {
   12374 	bool sig;
   12375 
   12376 	sig = ctrl & CTRL_SWDPIN(1);
   12377 
   12378 	/*
   12379 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   12380 	 * detect a signal, 1 if they don't.
   12381 	 */
   12382 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   12383 		sig = !sig;
   12384 
   12385 	return sig;
   12386 }
   12387 
   12388 /*
   12389  * wm_tbi_mediainit:
   12390  *
   12391  *	Initialize media for use on 1000BASE-X devices.
   12392  */
   12393 static void
   12394 wm_tbi_mediainit(struct wm_softc *sc)
   12395 {
   12396 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12397 	const char *sep = "";
   12398 
   12399 	if (sc->sc_type < WM_T_82543)
   12400 		sc->sc_tipg = TIPG_WM_DFLT;
   12401 	else
   12402 		sc->sc_tipg = TIPG_LG_DFLT;
   12403 
   12404 	sc->sc_tbi_serdes_anegticks = 5;
   12405 
   12406 	/* Initialize our media structures */
   12407 	sc->sc_mii.mii_ifp = ifp;
   12408 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   12409 
   12410 	ifp->if_baudrate = IF_Gbps(1);
   12411 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   12412 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12413 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12414 		    wm_serdes_mediachange, wm_serdes_mediastatus,
   12415 		    sc->sc_core_lock);
   12416 	} else {
   12417 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
   12418 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
   12419 	}
   12420 
   12421 	/*
   12422 	 * SWD Pins:
   12423 	 *
   12424 	 *	0 = Link LED (output)
   12425 	 *	1 = Loss Of Signal (input)
   12426 	 */
   12427 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   12428 
   12429 	/* XXX Perhaps this is only for TBI */
   12430 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12431 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   12432 
   12433 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   12434 		sc->sc_ctrl &= ~CTRL_LRST;
   12435 
   12436 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12437 
   12438 #define	ADD(ss, mm, dd)							\
   12439 do {									\
   12440 	aprint_normal("%s%s", sep, ss);					\
   12441 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12442 	sep = ", ";							\
   12443 } while (/*CONSTCOND*/0)
   12444 
   12445 	aprint_normal_dev(sc->sc_dev, "");
   12446 
   12447 	if (sc->sc_type == WM_T_I354) {
   12448 		uint32_t status;
   12449 
   12450 		status = CSR_READ(sc, WMREG_STATUS);
   12451 		if (((status & STATUS_2P5_SKU) != 0)
   12452 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12453 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12454 		} else
   12455 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12456 	} else if (sc->sc_type == WM_T_82545) {
   12457 		/* Only 82545 is LX (XXX except SFP) */
   12458 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12459 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12460 	} else if (sc->sc_sfptype != 0) {
   12461 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12462 		switch (sc->sc_sfptype) {
   12463 		default:
   12464 		case SFF_SFP_ETH_FLAGS_1000SX:
   12465 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12466 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12467 			break;
   12468 		case SFF_SFP_ETH_FLAGS_1000LX:
   12469 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12470 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12471 			break;
   12472 		case SFF_SFP_ETH_FLAGS_1000CX:
   12473 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12474 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12475 			break;
   12476 		case SFF_SFP_ETH_FLAGS_1000T:
   12477 			ADD("1000baseT", IFM_1000_T, 0);
   12478 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12479 			break;
   12480 		case SFF_SFP_ETH_FLAGS_100FX:
   12481 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12482 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12483 			break;
   12484 		}
   12485 	} else {
   12486 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12487 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12488 	}
   12489 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12490 	aprint_normal("\n");
   12491 
   12492 #undef ADD
   12493 
   12494 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12495 }
   12496 
   12497 /*
   12498  * wm_tbi_mediachange:	[ifmedia interface function]
   12499  *
   12500  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12501  */
   12502 static int
   12503 wm_tbi_mediachange(struct ifnet *ifp)
   12504 {
   12505 	struct wm_softc *sc = ifp->if_softc;
   12506 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12507 	uint32_t status, ctrl;
   12508 	bool signal;
   12509 	int i;
   12510 
   12511 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12512 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12513 		/* XXX need some work for >= 82571 and < 82575 */
   12514 		if (sc->sc_type < WM_T_82575)
   12515 			return 0;
   12516 	}
   12517 
   12518 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12519 	    || (sc->sc_type >= WM_T_82575))
   12520 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12521 
   12522 	sc->sc_ctrl &= ~CTRL_LRST;
   12523 	sc->sc_txcw = TXCW_ANE;
   12524 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12525 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12526 	else if (ife->ifm_media & IFM_FDX)
   12527 		sc->sc_txcw |= TXCW_FD;
   12528 	else
   12529 		sc->sc_txcw |= TXCW_HD;
   12530 
   12531 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12532 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12533 
   12534 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12535 		device_xname(sc->sc_dev), sc->sc_txcw));
   12536 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12537 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12538 	CSR_WRITE_FLUSH(sc);
   12539 	delay(1000);
   12540 
   12541 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12542 	signal = wm_tbi_havesignal(sc, ctrl);
   12543 
   12544 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12545 		signal));
   12546 
   12547 	if (signal) {
   12548 		/* Have signal; wait for the link to come up. */
   12549 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12550 			delay(10000);
   12551 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12552 				break;
   12553 		}
   12554 
   12555 		DPRINTF(sc, WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12556 			device_xname(sc->sc_dev), i));
   12557 
   12558 		status = CSR_READ(sc, WMREG_STATUS);
   12559 		DPRINTF(sc, WM_DEBUG_LINK,
   12560 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
   12561 			__PRIxBIT "\n",
   12562 			device_xname(sc->sc_dev), status, STATUS_LU));
   12563 		if (status & STATUS_LU) {
   12564 			/* Link is up. */
   12565 			DPRINTF(sc, WM_DEBUG_LINK,
   12566 			    ("%s: LINK: set media -> link up %s\n",
   12567 				device_xname(sc->sc_dev),
   12568 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12569 
   12570 			/*
   12571 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12572 			 * so we should update sc->sc_ctrl
   12573 			 */
   12574 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12575 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12576 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12577 			if (status & STATUS_FD)
   12578 				sc->sc_tctl |=
   12579 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12580 			else
   12581 				sc->sc_tctl |=
   12582 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12583 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12584 				sc->sc_fcrtl |= FCRTL_XONE;
   12585 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12586 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12587 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12588 			sc->sc_tbi_linkup = 1;
   12589 		} else {
   12590 			if (i == WM_LINKUP_TIMEOUT)
   12591 				wm_check_for_link(sc);
   12592 			/* Link is down. */
   12593 			DPRINTF(sc, WM_DEBUG_LINK,
   12594 			    ("%s: LINK: set media -> link down\n",
   12595 				device_xname(sc->sc_dev)));
   12596 			sc->sc_tbi_linkup = 0;
   12597 		}
   12598 	} else {
   12599 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12600 			device_xname(sc->sc_dev)));
   12601 		sc->sc_tbi_linkup = 0;
   12602 	}
   12603 
   12604 	wm_tbi_serdes_set_linkled(sc);
   12605 
   12606 	return 0;
   12607 }
   12608 
   12609 /*
   12610  * wm_tbi_mediastatus:	[ifmedia interface function]
   12611  *
   12612  *	Get the current interface media status on a 1000BASE-X device.
   12613  */
   12614 static void
   12615 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12616 {
   12617 	struct wm_softc *sc = ifp->if_softc;
   12618 	uint32_t ctrl, status;
   12619 
   12620 	ifmr->ifm_status = IFM_AVALID;
   12621 	ifmr->ifm_active = IFM_ETHER;
   12622 
   12623 	status = CSR_READ(sc, WMREG_STATUS);
   12624 	if ((status & STATUS_LU) == 0) {
   12625 		ifmr->ifm_active |= IFM_NONE;
   12626 		return;
   12627 	}
   12628 
   12629 	ifmr->ifm_status |= IFM_ACTIVE;
   12630 	/* Only 82545 is LX */
   12631 	if (sc->sc_type == WM_T_82545)
   12632 		ifmr->ifm_active |= IFM_1000_LX;
   12633 	else
   12634 		ifmr->ifm_active |= IFM_1000_SX;
   12635 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12636 		ifmr->ifm_active |= IFM_FDX;
   12637 	else
   12638 		ifmr->ifm_active |= IFM_HDX;
   12639 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12640 	if (ctrl & CTRL_RFCE)
   12641 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12642 	if (ctrl & CTRL_TFCE)
   12643 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12644 }
   12645 
   12646 /* XXX TBI only */
   12647 static int
   12648 wm_check_for_link(struct wm_softc *sc)
   12649 {
   12650 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12651 	uint32_t rxcw;
   12652 	uint32_t ctrl;
   12653 	uint32_t status;
   12654 	bool signal;
   12655 
   12656 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
   12657 		device_xname(sc->sc_dev), __func__));
   12658 
   12659 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12660 		/* XXX need some work for >= 82571 */
   12661 		if (sc->sc_type >= WM_T_82571) {
   12662 			sc->sc_tbi_linkup = 1;
   12663 			return 0;
   12664 		}
   12665 	}
   12666 
   12667 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12668 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12669 	status = CSR_READ(sc, WMREG_STATUS);
   12670 	signal = wm_tbi_havesignal(sc, ctrl);
   12671 
   12672 	DPRINTF(sc, WM_DEBUG_LINK,
   12673 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12674 		device_xname(sc->sc_dev), __func__, signal,
   12675 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12676 
   12677 	/*
   12678 	 * SWDPIN   LU RXCW
   12679 	 *	0    0	  0
   12680 	 *	0    0	  1	(should not happen)
   12681 	 *	0    1	  0	(should not happen)
   12682 	 *	0    1	  1	(should not happen)
   12683 	 *	1    0	  0	Disable autonego and force linkup
   12684 	 *	1    0	  1	got /C/ but not linkup yet
   12685 	 *	1    1	  0	(linkup)
   12686 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12687 	 *
   12688 	 */
   12689 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12690 		DPRINTF(sc, WM_DEBUG_LINK,
   12691 		    ("%s: %s: force linkup and fullduplex\n",
   12692 			device_xname(sc->sc_dev), __func__));
   12693 		sc->sc_tbi_linkup = 0;
   12694 		/* Disable auto-negotiation in the TXCW register */
   12695 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12696 
   12697 		/*
   12698 		 * Force link-up and also force full-duplex.
   12699 		 *
   12700 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12701 		 * so we should update sc->sc_ctrl
   12702 		 */
   12703 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12704 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12705 	} else if (((status & STATUS_LU) != 0)
   12706 	    && ((rxcw & RXCW_C) != 0)
   12707 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12708 		sc->sc_tbi_linkup = 1;
   12709 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12710 			device_xname(sc->sc_dev),
   12711 			__func__));
   12712 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12713 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12714 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12715 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
   12716 			device_xname(sc->sc_dev), __func__));
   12717 	} else {
   12718 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12719 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12720 			status));
   12721 	}
   12722 
   12723 	return 0;
   12724 }
   12725 
   12726 /*
   12727  * wm_tbi_tick:
   12728  *
   12729  *	Check the link on TBI devices.
   12730  *	This function acts as mii_tick().
   12731  */
   12732 static void
   12733 wm_tbi_tick(struct wm_softc *sc)
   12734 {
   12735 	struct mii_data *mii = &sc->sc_mii;
   12736 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12737 	uint32_t status;
   12738 
   12739 	KASSERT(WM_CORE_LOCKED(sc));
   12740 
   12741 	status = CSR_READ(sc, WMREG_STATUS);
   12742 
   12743 	/* XXX is this needed? */
   12744 	(void)CSR_READ(sc, WMREG_RXCW);
   12745 	(void)CSR_READ(sc, WMREG_CTRL);
   12746 
   12747 	/* set link status */
   12748 	if ((status & STATUS_LU) == 0) {
   12749 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12750 			device_xname(sc->sc_dev)));
   12751 		sc->sc_tbi_linkup = 0;
   12752 	} else if (sc->sc_tbi_linkup == 0) {
   12753 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12754 			device_xname(sc->sc_dev),
   12755 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12756 		sc->sc_tbi_linkup = 1;
   12757 		sc->sc_tbi_serdes_ticks = 0;
   12758 	}
   12759 
   12760 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12761 		goto setled;
   12762 
   12763 	if ((status & STATUS_LU) == 0) {
   12764 		sc->sc_tbi_linkup = 0;
   12765 		/* If the timer expired, retry autonegotiation */
   12766 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12767 		    && (++sc->sc_tbi_serdes_ticks
   12768 			>= sc->sc_tbi_serdes_anegticks)) {
   12769 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12770 				device_xname(sc->sc_dev), __func__));
   12771 			sc->sc_tbi_serdes_ticks = 0;
   12772 			/*
   12773 			 * Reset the link, and let autonegotiation do
   12774 			 * its thing
   12775 			 */
   12776 			sc->sc_ctrl |= CTRL_LRST;
   12777 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12778 			CSR_WRITE_FLUSH(sc);
   12779 			delay(1000);
   12780 			sc->sc_ctrl &= ~CTRL_LRST;
   12781 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12782 			CSR_WRITE_FLUSH(sc);
   12783 			delay(1000);
   12784 			CSR_WRITE(sc, WMREG_TXCW,
   12785 			    sc->sc_txcw & ~TXCW_ANE);
   12786 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12787 		}
   12788 	}
   12789 
   12790 setled:
   12791 	wm_tbi_serdes_set_linkled(sc);
   12792 }
   12793 
   12794 /* SERDES related */
   12795 static void
   12796 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12797 {
   12798 	uint32_t reg;
   12799 
   12800 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12801 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12802 		return;
   12803 
   12804 	/* Enable PCS to turn on link */
   12805 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12806 	reg |= PCS_CFG_PCS_EN;
   12807 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12808 
   12809 	/* Power up the laser */
   12810 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12811 	reg &= ~CTRL_EXT_SWDPIN(3);
   12812 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12813 
   12814 	/* Flush the write to verify completion */
   12815 	CSR_WRITE_FLUSH(sc);
   12816 	delay(1000);
   12817 }
   12818 
   12819 static int
   12820 wm_serdes_mediachange(struct ifnet *ifp)
   12821 {
   12822 	struct wm_softc *sc = ifp->if_softc;
   12823 	bool pcs_autoneg = true; /* XXX */
   12824 	uint32_t ctrl_ext, pcs_lctl, reg;
   12825 
   12826 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12827 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12828 		return 0;
   12829 
   12830 	/* XXX Currently, this function is not called on 8257[12] */
   12831 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12832 	    || (sc->sc_type >= WM_T_82575))
   12833 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12834 
   12835 	/* Power on the sfp cage if present */
   12836 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12837 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12838 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12839 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12840 
   12841 	sc->sc_ctrl |= CTRL_SLU;
   12842 
   12843 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   12844 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12845 
   12846 		reg = CSR_READ(sc, WMREG_CONNSW);
   12847 		reg |= CONNSW_ENRGSRC;
   12848 		CSR_WRITE(sc, WMREG_CONNSW, reg);
   12849 	}
   12850 
   12851 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12852 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12853 	case CTRL_EXT_LINK_MODE_SGMII:
   12854 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12855 		pcs_autoneg = true;
   12856 		/* Autoneg time out should be disabled for SGMII mode */
   12857 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12858 		break;
   12859 	case CTRL_EXT_LINK_MODE_1000KX:
   12860 		pcs_autoneg = false;
   12861 		/* FALLTHROUGH */
   12862 	default:
   12863 		if ((sc->sc_type == WM_T_82575)
   12864 		    || (sc->sc_type == WM_T_82576)) {
   12865 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12866 				pcs_autoneg = false;
   12867 		}
   12868 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12869 		    | CTRL_FRCFDX;
   12870 
   12871 		/* Set speed of 1000/Full if speed/duplex is forced */
   12872 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12873 	}
   12874 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12875 
   12876 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12877 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12878 
   12879 	if (pcs_autoneg) {
   12880 		/* Set PCS register for autoneg */
   12881 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12882 
   12883 		/* Disable force flow control for autoneg */
   12884 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12885 
   12886 		/* Configure flow control advertisement for autoneg */
   12887 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12888 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12889 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12890 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12891 	} else
   12892 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12893 
   12894 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12895 
   12896 	return 0;
   12897 }
   12898 
   12899 static void
   12900 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12901 {
   12902 	struct wm_softc *sc = ifp->if_softc;
   12903 	struct mii_data *mii = &sc->sc_mii;
   12904 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12905 	uint32_t pcs_adv, pcs_lpab, reg;
   12906 
   12907 	ifmr->ifm_status = IFM_AVALID;
   12908 	ifmr->ifm_active = IFM_ETHER;
   12909 
   12910 	/* Check PCS */
   12911 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12912 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12913 		ifmr->ifm_active |= IFM_NONE;
   12914 		sc->sc_tbi_linkup = 0;
   12915 		goto setled;
   12916 	}
   12917 
   12918 	sc->sc_tbi_linkup = 1;
   12919 	ifmr->ifm_status |= IFM_ACTIVE;
   12920 	if (sc->sc_type == WM_T_I354) {
   12921 		uint32_t status;
   12922 
   12923 		status = CSR_READ(sc, WMREG_STATUS);
   12924 		if (((status & STATUS_2P5_SKU) != 0)
   12925 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12926 			ifmr->ifm_active |= IFM_2500_KX;
   12927 		} else
   12928 			ifmr->ifm_active |= IFM_1000_KX;
   12929 	} else {
   12930 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12931 		case PCS_LSTS_SPEED_10:
   12932 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12933 			break;
   12934 		case PCS_LSTS_SPEED_100:
   12935 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12936 			break;
   12937 		case PCS_LSTS_SPEED_1000:
   12938 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12939 			break;
   12940 		default:
   12941 			device_printf(sc->sc_dev, "Unknown speed\n");
   12942 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12943 			break;
   12944 		}
   12945 	}
   12946 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   12947 	if ((reg & PCS_LSTS_FDX) != 0)
   12948 		ifmr->ifm_active |= IFM_FDX;
   12949 	else
   12950 		ifmr->ifm_active |= IFM_HDX;
   12951 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12952 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12953 		/* Check flow */
   12954 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12955 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12956 			DPRINTF(sc, WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12957 			goto setled;
   12958 		}
   12959 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12960 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12961 		DPRINTF(sc, WM_DEBUG_LINK,
   12962 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12963 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12964 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12965 			mii->mii_media_active |= IFM_FLOW
   12966 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12967 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12968 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12969 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12970 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12971 			mii->mii_media_active |= IFM_FLOW
   12972 			    | IFM_ETH_TXPAUSE;
   12973 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12974 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12975 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12976 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12977 			mii->mii_media_active |= IFM_FLOW
   12978 			    | IFM_ETH_RXPAUSE;
   12979 		}
   12980 	}
   12981 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12982 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12983 setled:
   12984 	wm_tbi_serdes_set_linkled(sc);
   12985 }
   12986 
   12987 /*
   12988  * wm_serdes_tick:
   12989  *
   12990  *	Check the link on serdes devices.
   12991  */
   12992 static void
   12993 wm_serdes_tick(struct wm_softc *sc)
   12994 {
   12995 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12996 	struct mii_data *mii = &sc->sc_mii;
   12997 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12998 	uint32_t reg;
   12999 
   13000 	KASSERT(WM_CORE_LOCKED(sc));
   13001 
   13002 	mii->mii_media_status = IFM_AVALID;
   13003 	mii->mii_media_active = IFM_ETHER;
   13004 
   13005 	/* Check PCS */
   13006 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   13007 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   13008 		mii->mii_media_status |= IFM_ACTIVE;
   13009 		sc->sc_tbi_linkup = 1;
   13010 		sc->sc_tbi_serdes_ticks = 0;
   13011 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   13012 		if ((reg & PCS_LSTS_FDX) != 0)
   13013 			mii->mii_media_active |= IFM_FDX;
   13014 		else
   13015 			mii->mii_media_active |= IFM_HDX;
   13016 	} else {
   13017 		mii->mii_media_status |= IFM_NONE;
   13018 		sc->sc_tbi_linkup = 0;
   13019 		/* If the timer expired, retry autonegotiation */
   13020 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   13021 		    && (++sc->sc_tbi_serdes_ticks
   13022 			>= sc->sc_tbi_serdes_anegticks)) {
   13023 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   13024 				device_xname(sc->sc_dev), __func__));
   13025 			sc->sc_tbi_serdes_ticks = 0;
   13026 			/* XXX */
   13027 			wm_serdes_mediachange(ifp);
   13028 		}
   13029 	}
   13030 
   13031 	wm_tbi_serdes_set_linkled(sc);
   13032 }
   13033 
   13034 /* SFP related */
   13035 
   13036 static int
   13037 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   13038 {
   13039 	uint32_t i2ccmd;
   13040 	int i;
   13041 
   13042 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   13043 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   13044 
   13045 	/* Poll the ready bit */
   13046 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   13047 		delay(50);
   13048 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   13049 		if (i2ccmd & I2CCMD_READY)
   13050 			break;
   13051 	}
   13052 	if ((i2ccmd & I2CCMD_READY) == 0)
   13053 		return -1;
   13054 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   13055 		return -1;
   13056 
   13057 	*data = i2ccmd & 0x00ff;
   13058 
   13059 	return 0;
   13060 }
   13061 
   13062 static uint32_t
   13063 wm_sfp_get_media_type(struct wm_softc *sc)
   13064 {
   13065 	uint32_t ctrl_ext;
   13066 	uint8_t val = 0;
   13067 	int timeout = 3;
   13068 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   13069 	int rv = -1;
   13070 
   13071 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13072 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   13073 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   13074 	CSR_WRITE_FLUSH(sc);
   13075 
   13076 	/* Read SFP module data */
   13077 	while (timeout) {
   13078 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   13079 		if (rv == 0)
   13080 			break;
   13081 		delay(100*1000); /* XXX too big */
   13082 		timeout--;
   13083 	}
   13084 	if (rv != 0)
   13085 		goto out;
   13086 
   13087 	switch (val) {
   13088 	case SFF_SFP_ID_SFF:
   13089 		aprint_normal_dev(sc->sc_dev,
   13090 		    "Module/Connector soldered to board\n");
   13091 		break;
   13092 	case SFF_SFP_ID_SFP:
   13093 		sc->sc_flags |= WM_F_SFP;
   13094 		break;
   13095 	case SFF_SFP_ID_UNKNOWN:
   13096 		goto out;
   13097 	default:
   13098 		break;
   13099 	}
   13100 
   13101 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   13102 	if (rv != 0)
   13103 		goto out;
   13104 
   13105 	sc->sc_sfptype = val;
   13106 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   13107 		mediatype = WM_MEDIATYPE_SERDES;
   13108 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   13109 		sc->sc_flags |= WM_F_SGMII;
   13110 		mediatype = WM_MEDIATYPE_COPPER;
   13111 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   13112 		sc->sc_flags |= WM_F_SGMII;
   13113 		mediatype = WM_MEDIATYPE_SERDES;
   13114 	} else {
   13115 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   13116 		    __func__, sc->sc_sfptype);
   13117 		sc->sc_sfptype = 0; /* XXX unknown */
   13118 	}
   13119 
   13120 out:
   13121 	/* Restore I2C interface setting */
   13122 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13123 
   13124 	return mediatype;
   13125 }
   13126 
   13127 /*
   13128  * NVM related.
   13129  * Microwire, SPI (w/wo EERD) and Flash.
   13130  */
   13131 
   13132 /* Both spi and uwire */
   13133 
   13134 /*
   13135  * wm_eeprom_sendbits:
   13136  *
   13137  *	Send a series of bits to the EEPROM.
   13138  */
   13139 static void
   13140 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   13141 {
   13142 	uint32_t reg;
   13143 	int x;
   13144 
   13145 	reg = CSR_READ(sc, WMREG_EECD);
   13146 
   13147 	for (x = nbits; x > 0; x--) {
   13148 		if (bits & (1U << (x - 1)))
   13149 			reg |= EECD_DI;
   13150 		else
   13151 			reg &= ~EECD_DI;
   13152 		CSR_WRITE(sc, WMREG_EECD, reg);
   13153 		CSR_WRITE_FLUSH(sc);
   13154 		delay(2);
   13155 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13156 		CSR_WRITE_FLUSH(sc);
   13157 		delay(2);
   13158 		CSR_WRITE(sc, WMREG_EECD, reg);
   13159 		CSR_WRITE_FLUSH(sc);
   13160 		delay(2);
   13161 	}
   13162 }
   13163 
   13164 /*
   13165  * wm_eeprom_recvbits:
   13166  *
   13167  *	Receive a series of bits from the EEPROM.
   13168  */
   13169 static void
   13170 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   13171 {
   13172 	uint32_t reg, val;
   13173 	int x;
   13174 
   13175 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   13176 
   13177 	val = 0;
   13178 	for (x = nbits; x > 0; x--) {
   13179 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   13180 		CSR_WRITE_FLUSH(sc);
   13181 		delay(2);
   13182 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   13183 			val |= (1U << (x - 1));
   13184 		CSR_WRITE(sc, WMREG_EECD, reg);
   13185 		CSR_WRITE_FLUSH(sc);
   13186 		delay(2);
   13187 	}
   13188 	*valp = val;
   13189 }
   13190 
   13191 /* Microwire */
   13192 
   13193 /*
   13194  * wm_nvm_read_uwire:
   13195  *
   13196  *	Read a word from the EEPROM using the MicroWire protocol.
   13197  */
   13198 static int
   13199 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13200 {
   13201 	uint32_t reg, val;
   13202 	int i;
   13203 
   13204 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13205 		device_xname(sc->sc_dev), __func__));
   13206 
   13207 	if (sc->nvm.acquire(sc) != 0)
   13208 		return -1;
   13209 
   13210 	for (i = 0; i < wordcnt; i++) {
   13211 		/* Clear SK and DI. */
   13212 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   13213 		CSR_WRITE(sc, WMREG_EECD, reg);
   13214 
   13215 		/*
   13216 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   13217 		 * and Xen.
   13218 		 *
   13219 		 * We use this workaround only for 82540 because qemu's
   13220 		 * e1000 act as 82540.
   13221 		 */
   13222 		if (sc->sc_type == WM_T_82540) {
   13223 			reg |= EECD_SK;
   13224 			CSR_WRITE(sc, WMREG_EECD, reg);
   13225 			reg &= ~EECD_SK;
   13226 			CSR_WRITE(sc, WMREG_EECD, reg);
   13227 			CSR_WRITE_FLUSH(sc);
   13228 			delay(2);
   13229 		}
   13230 		/* XXX: end of workaround */
   13231 
   13232 		/* Set CHIP SELECT. */
   13233 		reg |= EECD_CS;
   13234 		CSR_WRITE(sc, WMREG_EECD, reg);
   13235 		CSR_WRITE_FLUSH(sc);
   13236 		delay(2);
   13237 
   13238 		/* Shift in the READ command. */
   13239 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   13240 
   13241 		/* Shift in address. */
   13242 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   13243 
   13244 		/* Shift out the data. */
   13245 		wm_eeprom_recvbits(sc, &val, 16);
   13246 		data[i] = val & 0xffff;
   13247 
   13248 		/* Clear CHIP SELECT. */
   13249 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   13250 		CSR_WRITE(sc, WMREG_EECD, reg);
   13251 		CSR_WRITE_FLUSH(sc);
   13252 		delay(2);
   13253 	}
   13254 
   13255 	sc->nvm.release(sc);
   13256 	return 0;
   13257 }
   13258 
   13259 /* SPI */
   13260 
   13261 /*
   13262  * Set SPI and FLASH related information from the EECD register.
   13263  * For 82541 and 82547, the word size is taken from EEPROM.
   13264  */
   13265 static int
   13266 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   13267 {
   13268 	int size;
   13269 	uint32_t reg;
   13270 	uint16_t data;
   13271 
   13272 	reg = CSR_READ(sc, WMREG_EECD);
   13273 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   13274 
   13275 	/* Read the size of NVM from EECD by default */
   13276 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13277 	switch (sc->sc_type) {
   13278 	case WM_T_82541:
   13279 	case WM_T_82541_2:
   13280 	case WM_T_82547:
   13281 	case WM_T_82547_2:
   13282 		/* Set dummy value to access EEPROM */
   13283 		sc->sc_nvm_wordsize = 64;
   13284 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   13285 			aprint_error_dev(sc->sc_dev,
   13286 			    "%s: failed to read EEPROM size\n", __func__);
   13287 		}
   13288 		reg = data;
   13289 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   13290 		if (size == 0)
   13291 			size = 6; /* 64 word size */
   13292 		else
   13293 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   13294 		break;
   13295 	case WM_T_80003:
   13296 	case WM_T_82571:
   13297 	case WM_T_82572:
   13298 	case WM_T_82573: /* SPI case */
   13299 	case WM_T_82574: /* SPI case */
   13300 	case WM_T_82583: /* SPI case */
   13301 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13302 		if (size > 14)
   13303 			size = 14;
   13304 		break;
   13305 	case WM_T_82575:
   13306 	case WM_T_82576:
   13307 	case WM_T_82580:
   13308 	case WM_T_I350:
   13309 	case WM_T_I354:
   13310 	case WM_T_I210:
   13311 	case WM_T_I211:
   13312 		size += NVM_WORD_SIZE_BASE_SHIFT;
   13313 		if (size > 15)
   13314 			size = 15;
   13315 		break;
   13316 	default:
   13317 		aprint_error_dev(sc->sc_dev,
   13318 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   13319 		return -1;
   13320 		break;
   13321 	}
   13322 
   13323 	sc->sc_nvm_wordsize = 1 << size;
   13324 
   13325 	return 0;
   13326 }
   13327 
   13328 /*
   13329  * wm_nvm_ready_spi:
   13330  *
   13331  *	Wait for a SPI EEPROM to be ready for commands.
   13332  */
   13333 static int
   13334 wm_nvm_ready_spi(struct wm_softc *sc)
   13335 {
   13336 	uint32_t val;
   13337 	int usec;
   13338 
   13339 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13340 		device_xname(sc->sc_dev), __func__));
   13341 
   13342 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   13343 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   13344 		wm_eeprom_recvbits(sc, &val, 8);
   13345 		if ((val & SPI_SR_RDY) == 0)
   13346 			break;
   13347 	}
   13348 	if (usec >= SPI_MAX_RETRIES) {
   13349 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   13350 		return -1;
   13351 	}
   13352 	return 0;
   13353 }
   13354 
   13355 /*
   13356  * wm_nvm_read_spi:
   13357  *
   13358  *	Read a work from the EEPROM using the SPI protocol.
   13359  */
   13360 static int
   13361 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13362 {
   13363 	uint32_t reg, val;
   13364 	int i;
   13365 	uint8_t opc;
   13366 	int rv = 0;
   13367 
   13368 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13369 		device_xname(sc->sc_dev), __func__));
   13370 
   13371 	if (sc->nvm.acquire(sc) != 0)
   13372 		return -1;
   13373 
   13374 	/* Clear SK and CS. */
   13375 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   13376 	CSR_WRITE(sc, WMREG_EECD, reg);
   13377 	CSR_WRITE_FLUSH(sc);
   13378 	delay(2);
   13379 
   13380 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   13381 		goto out;
   13382 
   13383 	/* Toggle CS to flush commands. */
   13384 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   13385 	CSR_WRITE_FLUSH(sc);
   13386 	delay(2);
   13387 	CSR_WRITE(sc, WMREG_EECD, reg);
   13388 	CSR_WRITE_FLUSH(sc);
   13389 	delay(2);
   13390 
   13391 	opc = SPI_OPC_READ;
   13392 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   13393 		opc |= SPI_OPC_A8;
   13394 
   13395 	wm_eeprom_sendbits(sc, opc, 8);
   13396 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   13397 
   13398 	for (i = 0; i < wordcnt; i++) {
   13399 		wm_eeprom_recvbits(sc, &val, 16);
   13400 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   13401 	}
   13402 
   13403 	/* Raise CS and clear SK. */
   13404 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   13405 	CSR_WRITE(sc, WMREG_EECD, reg);
   13406 	CSR_WRITE_FLUSH(sc);
   13407 	delay(2);
   13408 
   13409 out:
   13410 	sc->nvm.release(sc);
   13411 	return rv;
   13412 }
   13413 
   13414 /* Using with EERD */
   13415 
   13416 static int
   13417 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   13418 {
   13419 	uint32_t attempts = 100000;
   13420 	uint32_t i, reg = 0;
   13421 	int32_t done = -1;
   13422 
   13423 	for (i = 0; i < attempts; i++) {
   13424 		reg = CSR_READ(sc, rw);
   13425 
   13426 		if (reg & EERD_DONE) {
   13427 			done = 0;
   13428 			break;
   13429 		}
   13430 		delay(5);
   13431 	}
   13432 
   13433 	return done;
   13434 }
   13435 
   13436 static int
   13437 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   13438 {
   13439 	int i, eerd = 0;
   13440 	int rv = 0;
   13441 
   13442 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13443 		device_xname(sc->sc_dev), __func__));
   13444 
   13445 	if (sc->nvm.acquire(sc) != 0)
   13446 		return -1;
   13447 
   13448 	for (i = 0; i < wordcnt; i++) {
   13449 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13450 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13451 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13452 		if (rv != 0) {
   13453 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13454 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13455 			break;
   13456 		}
   13457 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13458 	}
   13459 
   13460 	sc->nvm.release(sc);
   13461 	return rv;
   13462 }
   13463 
   13464 /* Flash */
   13465 
   13466 static int
   13467 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13468 {
   13469 	uint32_t eecd;
   13470 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13471 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13472 	uint32_t nvm_dword = 0;
   13473 	uint8_t sig_byte = 0;
   13474 	int rv;
   13475 
   13476 	switch (sc->sc_type) {
   13477 	case WM_T_PCH_SPT:
   13478 	case WM_T_PCH_CNP:
   13479 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13480 		act_offset = ICH_NVM_SIG_WORD * 2;
   13481 
   13482 		/* Set bank to 0 in case flash read fails. */
   13483 		*bank = 0;
   13484 
   13485 		/* Check bank 0 */
   13486 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13487 		if (rv != 0)
   13488 			return rv;
   13489 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13490 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13491 			*bank = 0;
   13492 			return 0;
   13493 		}
   13494 
   13495 		/* Check bank 1 */
   13496 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13497 		    &nvm_dword);
   13498 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13499 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13500 			*bank = 1;
   13501 			return 0;
   13502 		}
   13503 		aprint_error_dev(sc->sc_dev,
   13504 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13505 		return -1;
   13506 	case WM_T_ICH8:
   13507 	case WM_T_ICH9:
   13508 		eecd = CSR_READ(sc, WMREG_EECD);
   13509 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13510 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13511 			return 0;
   13512 		}
   13513 		/* FALLTHROUGH */
   13514 	default:
   13515 		/* Default to 0 */
   13516 		*bank = 0;
   13517 
   13518 		/* Check bank 0 */
   13519 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13520 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13521 			*bank = 0;
   13522 			return 0;
   13523 		}
   13524 
   13525 		/* Check bank 1 */
   13526 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13527 		    &sig_byte);
   13528 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13529 			*bank = 1;
   13530 			return 0;
   13531 		}
   13532 	}
   13533 
   13534 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13535 		device_xname(sc->sc_dev)));
   13536 	return -1;
   13537 }
   13538 
   13539 /******************************************************************************
   13540  * This function does initial flash setup so that a new read/write/erase cycle
   13541  * can be started.
   13542  *
   13543  * sc - The pointer to the hw structure
   13544  ****************************************************************************/
   13545 static int32_t
   13546 wm_ich8_cycle_init(struct wm_softc *sc)
   13547 {
   13548 	uint16_t hsfsts;
   13549 	int32_t error = 1;
   13550 	int32_t i     = 0;
   13551 
   13552 	if (sc->sc_type >= WM_T_PCH_SPT)
   13553 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13554 	else
   13555 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13556 
   13557 	/* May be check the Flash Des Valid bit in Hw status */
   13558 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13559 		return error;
   13560 
   13561 	/* Clear FCERR in Hw status by writing 1 */
   13562 	/* Clear DAEL in Hw status by writing a 1 */
   13563 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13564 
   13565 	if (sc->sc_type >= WM_T_PCH_SPT)
   13566 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13567 	else
   13568 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13569 
   13570 	/*
   13571 	 * Either we should have a hardware SPI cycle in progress bit to check
   13572 	 * against, in order to start a new cycle or FDONE bit should be
   13573 	 * changed in the hardware so that it is 1 after hardware reset, which
   13574 	 * can then be used as an indication whether a cycle is in progress or
   13575 	 * has been completed .. we should also have some software semaphore
   13576 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13577 	 * threads access to those bits can be sequentiallized or a way so that
   13578 	 * 2 threads don't start the cycle at the same time
   13579 	 */
   13580 
   13581 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13582 		/*
   13583 		 * There is no cycle running at present, so we can start a
   13584 		 * cycle
   13585 		 */
   13586 
   13587 		/* Begin by setting Flash Cycle Done. */
   13588 		hsfsts |= HSFSTS_DONE;
   13589 		if (sc->sc_type >= WM_T_PCH_SPT)
   13590 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13591 			    hsfsts & 0xffffUL);
   13592 		else
   13593 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13594 		error = 0;
   13595 	} else {
   13596 		/*
   13597 		 * Otherwise poll for sometime so the current cycle has a
   13598 		 * chance to end before giving up.
   13599 		 */
   13600 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13601 			if (sc->sc_type >= WM_T_PCH_SPT)
   13602 				hsfsts = ICH8_FLASH_READ32(sc,
   13603 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13604 			else
   13605 				hsfsts = ICH8_FLASH_READ16(sc,
   13606 				    ICH_FLASH_HSFSTS);
   13607 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13608 				error = 0;
   13609 				break;
   13610 			}
   13611 			delay(1);
   13612 		}
   13613 		if (error == 0) {
   13614 			/*
   13615 			 * Successful in waiting for previous cycle to timeout,
   13616 			 * now set the Flash Cycle Done.
   13617 			 */
   13618 			hsfsts |= HSFSTS_DONE;
   13619 			if (sc->sc_type >= WM_T_PCH_SPT)
   13620 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13621 				    hsfsts & 0xffffUL);
   13622 			else
   13623 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13624 				    hsfsts);
   13625 		}
   13626 	}
   13627 	return error;
   13628 }
   13629 
   13630 /******************************************************************************
   13631  * This function starts a flash cycle and waits for its completion
   13632  *
   13633  * sc - The pointer to the hw structure
   13634  ****************************************************************************/
   13635 static int32_t
   13636 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13637 {
   13638 	uint16_t hsflctl;
   13639 	uint16_t hsfsts;
   13640 	int32_t error = 1;
   13641 	uint32_t i = 0;
   13642 
   13643 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13644 	if (sc->sc_type >= WM_T_PCH_SPT)
   13645 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13646 	else
   13647 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13648 	hsflctl |= HSFCTL_GO;
   13649 	if (sc->sc_type >= WM_T_PCH_SPT)
   13650 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13651 		    (uint32_t)hsflctl << 16);
   13652 	else
   13653 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13654 
   13655 	/* Wait till FDONE bit is set to 1 */
   13656 	do {
   13657 		if (sc->sc_type >= WM_T_PCH_SPT)
   13658 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13659 			    & 0xffffUL;
   13660 		else
   13661 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13662 		if (hsfsts & HSFSTS_DONE)
   13663 			break;
   13664 		delay(1);
   13665 		i++;
   13666 	} while (i < timeout);
   13667 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13668 		error = 0;
   13669 
   13670 	return error;
   13671 }
   13672 
   13673 /******************************************************************************
   13674  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13675  *
   13676  * sc - The pointer to the hw structure
   13677  * index - The index of the byte or word to read.
   13678  * size - Size of data to read, 1=byte 2=word, 4=dword
   13679  * data - Pointer to the word to store the value read.
   13680  *****************************************************************************/
   13681 static int32_t
   13682 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13683     uint32_t size, uint32_t *data)
   13684 {
   13685 	uint16_t hsfsts;
   13686 	uint16_t hsflctl;
   13687 	uint32_t flash_linear_address;
   13688 	uint32_t flash_data = 0;
   13689 	int32_t error = 1;
   13690 	int32_t count = 0;
   13691 
   13692 	if (size < 1  || size > 4 || data == 0x0 ||
   13693 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13694 		return error;
   13695 
   13696 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13697 	    sc->sc_ich8_flash_base;
   13698 
   13699 	do {
   13700 		delay(1);
   13701 		/* Steps */
   13702 		error = wm_ich8_cycle_init(sc);
   13703 		if (error)
   13704 			break;
   13705 
   13706 		if (sc->sc_type >= WM_T_PCH_SPT)
   13707 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13708 			    >> 16;
   13709 		else
   13710 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13711 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13712 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13713 		    & HSFCTL_BCOUNT_MASK;
   13714 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13715 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13716 			/*
   13717 			 * In SPT, This register is in Lan memory space, not
   13718 			 * flash. Therefore, only 32 bit access is supported.
   13719 			 */
   13720 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13721 			    (uint32_t)hsflctl << 16);
   13722 		} else
   13723 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13724 
   13725 		/*
   13726 		 * Write the last 24 bits of index into Flash Linear address
   13727 		 * field in Flash Address
   13728 		 */
   13729 		/* TODO: TBD maybe check the index against the size of flash */
   13730 
   13731 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13732 
   13733 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13734 
   13735 		/*
   13736 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13737 		 * the whole sequence a few more times, else read in (shift in)
   13738 		 * the Flash Data0, the order is least significant byte first
   13739 		 * msb to lsb
   13740 		 */
   13741 		if (error == 0) {
   13742 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13743 			if (size == 1)
   13744 				*data = (uint8_t)(flash_data & 0x000000FF);
   13745 			else if (size == 2)
   13746 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13747 			else if (size == 4)
   13748 				*data = (uint32_t)flash_data;
   13749 			break;
   13750 		} else {
   13751 			/*
   13752 			 * If we've gotten here, then things are probably
   13753 			 * completely hosed, but if the error condition is
   13754 			 * detected, it won't hurt to give it another try...
   13755 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13756 			 */
   13757 			if (sc->sc_type >= WM_T_PCH_SPT)
   13758 				hsfsts = ICH8_FLASH_READ32(sc,
   13759 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13760 			else
   13761 				hsfsts = ICH8_FLASH_READ16(sc,
   13762 				    ICH_FLASH_HSFSTS);
   13763 
   13764 			if (hsfsts & HSFSTS_ERR) {
   13765 				/* Repeat for some time before giving up. */
   13766 				continue;
   13767 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13768 				break;
   13769 		}
   13770 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13771 
   13772 	return error;
   13773 }
   13774 
   13775 /******************************************************************************
   13776  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13777  *
   13778  * sc - pointer to wm_hw structure
   13779  * index - The index of the byte to read.
   13780  * data - Pointer to a byte to store the value read.
   13781  *****************************************************************************/
   13782 static int32_t
   13783 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13784 {
   13785 	int32_t status;
   13786 	uint32_t word = 0;
   13787 
   13788 	status = wm_read_ich8_data(sc, index, 1, &word);
   13789 	if (status == 0)
   13790 		*data = (uint8_t)word;
   13791 	else
   13792 		*data = 0;
   13793 
   13794 	return status;
   13795 }
   13796 
   13797 /******************************************************************************
   13798  * Reads a word from the NVM using the ICH8 flash access registers.
   13799  *
   13800  * sc - pointer to wm_hw structure
   13801  * index - The starting byte index of the word to read.
   13802  * data - Pointer to a word to store the value read.
   13803  *****************************************************************************/
   13804 static int32_t
   13805 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13806 {
   13807 	int32_t status;
   13808 	uint32_t word = 0;
   13809 
   13810 	status = wm_read_ich8_data(sc, index, 2, &word);
   13811 	if (status == 0)
   13812 		*data = (uint16_t)word;
   13813 	else
   13814 		*data = 0;
   13815 
   13816 	return status;
   13817 }
   13818 
   13819 /******************************************************************************
   13820  * Reads a dword from the NVM using the ICH8 flash access registers.
   13821  *
   13822  * sc - pointer to wm_hw structure
   13823  * index - The starting byte index of the word to read.
   13824  * data - Pointer to a word to store the value read.
   13825  *****************************************************************************/
   13826 static int32_t
   13827 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13828 {
   13829 	int32_t status;
   13830 
   13831 	status = wm_read_ich8_data(sc, index, 4, data);
   13832 	return status;
   13833 }
   13834 
   13835 /******************************************************************************
   13836  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13837  * register.
   13838  *
   13839  * sc - Struct containing variables accessed by shared code
   13840  * offset - offset of word in the EEPROM to read
   13841  * data - word read from the EEPROM
   13842  * words - number of words to read
   13843  *****************************************************************************/
   13844 static int
   13845 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13846 {
   13847 	int32_t	 rv = 0;
   13848 	uint32_t flash_bank = 0;
   13849 	uint32_t act_offset = 0;
   13850 	uint32_t bank_offset = 0;
   13851 	uint16_t word = 0;
   13852 	uint16_t i = 0;
   13853 
   13854 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13855 		device_xname(sc->sc_dev), __func__));
   13856 
   13857 	if (sc->nvm.acquire(sc) != 0)
   13858 		return -1;
   13859 
   13860 	/*
   13861 	 * We need to know which is the valid flash bank.  In the event
   13862 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13863 	 * managing flash_bank. So it cannot be trusted and needs
   13864 	 * to be updated with each read.
   13865 	 */
   13866 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13867 	if (rv) {
   13868 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13869 			device_xname(sc->sc_dev)));
   13870 		flash_bank = 0;
   13871 	}
   13872 
   13873 	/*
   13874 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13875 	 * size
   13876 	 */
   13877 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13878 
   13879 	for (i = 0; i < words; i++) {
   13880 		/* The NVM part needs a byte offset, hence * 2 */
   13881 		act_offset = bank_offset + ((offset + i) * 2);
   13882 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13883 		if (rv) {
   13884 			aprint_error_dev(sc->sc_dev,
   13885 			    "%s: failed to read NVM\n", __func__);
   13886 			break;
   13887 		}
   13888 		data[i] = word;
   13889 	}
   13890 
   13891 	sc->nvm.release(sc);
   13892 	return rv;
   13893 }
   13894 
   13895 /******************************************************************************
   13896  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13897  * register.
   13898  *
   13899  * sc - Struct containing variables accessed by shared code
   13900  * offset - offset of word in the EEPROM to read
   13901  * data - word read from the EEPROM
   13902  * words - number of words to read
   13903  *****************************************************************************/
   13904 static int
   13905 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13906 {
   13907 	int32_t	 rv = 0;
   13908 	uint32_t flash_bank = 0;
   13909 	uint32_t act_offset = 0;
   13910 	uint32_t bank_offset = 0;
   13911 	uint32_t dword = 0;
   13912 	uint16_t i = 0;
   13913 
   13914 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13915 		device_xname(sc->sc_dev), __func__));
   13916 
   13917 	if (sc->nvm.acquire(sc) != 0)
   13918 		return -1;
   13919 
   13920 	/*
   13921 	 * We need to know which is the valid flash bank.  In the event
   13922 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13923 	 * managing flash_bank. So it cannot be trusted and needs
   13924 	 * to be updated with each read.
   13925 	 */
   13926 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13927 	if (rv) {
   13928 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13929 			device_xname(sc->sc_dev)));
   13930 		flash_bank = 0;
   13931 	}
   13932 
   13933 	/*
   13934 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13935 	 * size
   13936 	 */
   13937 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13938 
   13939 	for (i = 0; i < words; i++) {
   13940 		/* The NVM part needs a byte offset, hence * 2 */
   13941 		act_offset = bank_offset + ((offset + i) * 2);
   13942 		/* but we must read dword aligned, so mask ... */
   13943 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13944 		if (rv) {
   13945 			aprint_error_dev(sc->sc_dev,
   13946 			    "%s: failed to read NVM\n", __func__);
   13947 			break;
   13948 		}
   13949 		/* ... and pick out low or high word */
   13950 		if ((act_offset & 0x2) == 0)
   13951 			data[i] = (uint16_t)(dword & 0xFFFF);
   13952 		else
   13953 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13954 	}
   13955 
   13956 	sc->nvm.release(sc);
   13957 	return rv;
   13958 }
   13959 
   13960 /* iNVM */
   13961 
   13962 static int
   13963 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13964 {
   13965 	int32_t	 rv = 0;
   13966 	uint32_t invm_dword;
   13967 	uint16_t i;
   13968 	uint8_t record_type, word_address;
   13969 
   13970 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   13971 		device_xname(sc->sc_dev), __func__));
   13972 
   13973 	for (i = 0; i < INVM_SIZE; i++) {
   13974 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13975 		/* Get record type */
   13976 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13977 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13978 			break;
   13979 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13980 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13981 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13982 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13983 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13984 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13985 			if (word_address == address) {
   13986 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13987 				rv = 0;
   13988 				break;
   13989 			}
   13990 		}
   13991 	}
   13992 
   13993 	return rv;
   13994 }
   13995 
   13996 static int
   13997 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13998 {
   13999 	int rv = 0;
   14000 	int i;
   14001 
   14002 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14003 		device_xname(sc->sc_dev), __func__));
   14004 
   14005 	if (sc->nvm.acquire(sc) != 0)
   14006 		return -1;
   14007 
   14008 	for (i = 0; i < words; i++) {
   14009 		switch (offset + i) {
   14010 		case NVM_OFF_MACADDR:
   14011 		case NVM_OFF_MACADDR1:
   14012 		case NVM_OFF_MACADDR2:
   14013 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   14014 			if (rv != 0) {
   14015 				data[i] = 0xffff;
   14016 				rv = -1;
   14017 			}
   14018 			break;
   14019 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
   14020 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14021 			if (rv != 0) {
   14022 				*data = INVM_DEFAULT_AL;
   14023 				rv = 0;
   14024 			}
   14025 			break;
   14026 		case NVM_OFF_CFG2:
   14027 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14028 			if (rv != 0) {
   14029 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   14030 				rv = 0;
   14031 			}
   14032 			break;
   14033 		case NVM_OFF_CFG4:
   14034 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14035 			if (rv != 0) {
   14036 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   14037 				rv = 0;
   14038 			}
   14039 			break;
   14040 		case NVM_OFF_LED_1_CFG:
   14041 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14042 			if (rv != 0) {
   14043 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   14044 				rv = 0;
   14045 			}
   14046 			break;
   14047 		case NVM_OFF_LED_0_2_CFG:
   14048 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14049 			if (rv != 0) {
   14050 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   14051 				rv = 0;
   14052 			}
   14053 			break;
   14054 		case NVM_OFF_ID_LED_SETTINGS:
   14055 			rv = wm_nvm_read_word_invm(sc, offset, data);
   14056 			if (rv != 0) {
   14057 				*data = ID_LED_RESERVED_FFFF;
   14058 				rv = 0;
   14059 			}
   14060 			break;
   14061 		default:
   14062 			DPRINTF(sc, WM_DEBUG_NVM,
   14063 			    ("NVM word 0x%02x is not mapped.\n", offset));
   14064 			*data = NVM_RESERVED_WORD;
   14065 			break;
   14066 		}
   14067 	}
   14068 
   14069 	sc->nvm.release(sc);
   14070 	return rv;
   14071 }
   14072 
   14073 /* Lock, detecting NVM type, validate checksum, version and read */
   14074 
   14075 static int
   14076 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   14077 {
   14078 	uint32_t eecd = 0;
   14079 
   14080 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   14081 	    || sc->sc_type == WM_T_82583) {
   14082 		eecd = CSR_READ(sc, WMREG_EECD);
   14083 
   14084 		/* Isolate bits 15 & 16 */
   14085 		eecd = ((eecd >> 15) & 0x03);
   14086 
   14087 		/* If both bits are set, device is Flash type */
   14088 		if (eecd == 0x03)
   14089 			return 0;
   14090 	}
   14091 	return 1;
   14092 }
   14093 
   14094 static int
   14095 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   14096 {
   14097 	uint32_t eec;
   14098 
   14099 	eec = CSR_READ(sc, WMREG_EEC);
   14100 	if ((eec & EEC_FLASH_DETECTED) != 0)
   14101 		return 1;
   14102 
   14103 	return 0;
   14104 }
   14105 
   14106 /*
   14107  * wm_nvm_validate_checksum
   14108  *
   14109  * The checksum is defined as the sum of the first 64 (16 bit) words.
   14110  */
   14111 static int
   14112 wm_nvm_validate_checksum(struct wm_softc *sc)
   14113 {
   14114 	uint16_t checksum;
   14115 	uint16_t eeprom_data;
   14116 #ifdef WM_DEBUG
   14117 	uint16_t csum_wordaddr, valid_checksum;
   14118 #endif
   14119 	int i;
   14120 
   14121 	checksum = 0;
   14122 
   14123 	/* Don't check for I211 */
   14124 	if (sc->sc_type == WM_T_I211)
   14125 		return 0;
   14126 
   14127 #ifdef WM_DEBUG
   14128 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   14129 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   14130 		csum_wordaddr = NVM_OFF_COMPAT;
   14131 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   14132 	} else {
   14133 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   14134 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   14135 	}
   14136 
   14137 	/* Dump EEPROM image for debug */
   14138 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14139 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14140 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   14141 		/* XXX PCH_SPT? */
   14142 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   14143 		if ((eeprom_data & valid_checksum) == 0)
   14144 			DPRINTF(sc, WM_DEBUG_NVM,
   14145 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   14146 				device_xname(sc->sc_dev), eeprom_data,
   14147 				    valid_checksum));
   14148 	}
   14149 
   14150 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
   14151 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   14152 		for (i = 0; i < NVM_SIZE; i++) {
   14153 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14154 				printf("XXXX ");
   14155 			else
   14156 				printf("%04hx ", eeprom_data);
   14157 			if (i % 8 == 7)
   14158 				printf("\n");
   14159 		}
   14160 	}
   14161 
   14162 #endif /* WM_DEBUG */
   14163 
   14164 	for (i = 0; i < NVM_SIZE; i++) {
   14165 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   14166 			return 1;
   14167 		checksum += eeprom_data;
   14168 	}
   14169 
   14170 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   14171 #ifdef WM_DEBUG
   14172 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   14173 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   14174 #endif
   14175 	}
   14176 
   14177 	return 0;
   14178 }
   14179 
   14180 static void
   14181 wm_nvm_version_invm(struct wm_softc *sc)
   14182 {
   14183 	uint32_t dword;
   14184 
   14185 	/*
   14186 	 * Linux's code to decode version is very strange, so we don't
   14187 	 * obey that algorithm and just use word 61 as the document.
   14188 	 * Perhaps it's not perfect though...
   14189 	 *
   14190 	 * Example:
   14191 	 *
   14192 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   14193 	 */
   14194 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   14195 	dword = __SHIFTOUT(dword, INVM_VER_1);
   14196 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   14197 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   14198 }
   14199 
   14200 static void
   14201 wm_nvm_version(struct wm_softc *sc)
   14202 {
   14203 	uint16_t major, minor, build, patch;
   14204 	uint16_t uid0, uid1;
   14205 	uint16_t nvm_data;
   14206 	uint16_t off;
   14207 	bool check_version = false;
   14208 	bool check_optionrom = false;
   14209 	bool have_build = false;
   14210 	bool have_uid = true;
   14211 
   14212 	/*
   14213 	 * Version format:
   14214 	 *
   14215 	 * XYYZ
   14216 	 * X0YZ
   14217 	 * X0YY
   14218 	 *
   14219 	 * Example:
   14220 	 *
   14221 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   14222 	 *	82571	0x50a6	5.10.6?
   14223 	 *	82572	0x506a	5.6.10?
   14224 	 *	82572EI	0x5069	5.6.9?
   14225 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   14226 	 *		0x2013	2.1.3?
   14227 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   14228 	 * ICH8+82567	0x0040	0.4.0?
   14229 	 * ICH9+82566	0x1040	1.4.0?
   14230 	 *ICH10+82567	0x0043	0.4.3?
   14231 	 *  PCH+82577	0x00c1	0.12.1?
   14232 	 * PCH2+82579	0x00d3	0.13.3?
   14233 	 *		0x00d4	0.13.4?
   14234 	 *  LPT+I218	0x0023	0.2.3?
   14235 	 *  SPT+I219	0x0084	0.8.4?
   14236 	 *  CNP+I219	0x0054	0.5.4?
   14237 	 */
   14238 
   14239 	/*
   14240 	 * XXX
   14241 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   14242 	 * I've never seen real 82574 hardware with such small SPI ROM.
   14243 	 */
   14244 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   14245 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   14246 		have_uid = false;
   14247 
   14248 	switch (sc->sc_type) {
   14249 	case WM_T_82571:
   14250 	case WM_T_82572:
   14251 	case WM_T_82574:
   14252 	case WM_T_82583:
   14253 		check_version = true;
   14254 		check_optionrom = true;
   14255 		have_build = true;
   14256 		break;
   14257 	case WM_T_ICH8:
   14258 	case WM_T_ICH9:
   14259 	case WM_T_ICH10:
   14260 	case WM_T_PCH:
   14261 	case WM_T_PCH2:
   14262 	case WM_T_PCH_LPT:
   14263 	case WM_T_PCH_SPT:
   14264 	case WM_T_PCH_CNP:
   14265 		check_version = true;
   14266 		have_build = true;
   14267 		have_uid = false;
   14268 		break;
   14269 	case WM_T_82575:
   14270 	case WM_T_82576:
   14271 	case WM_T_82580:
   14272 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   14273 			check_version = true;
   14274 		break;
   14275 	case WM_T_I211:
   14276 		wm_nvm_version_invm(sc);
   14277 		have_uid = false;
   14278 		goto printver;
   14279 	case WM_T_I210:
   14280 		if (!wm_nvm_flash_presence_i210(sc)) {
   14281 			wm_nvm_version_invm(sc);
   14282 			have_uid = false;
   14283 			goto printver;
   14284 		}
   14285 		/* FALLTHROUGH */
   14286 	case WM_T_I350:
   14287 	case WM_T_I354:
   14288 		check_version = true;
   14289 		check_optionrom = true;
   14290 		break;
   14291 	default:
   14292 		return;
   14293 	}
   14294 	if (check_version
   14295 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   14296 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   14297 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   14298 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   14299 			build = nvm_data & NVM_BUILD_MASK;
   14300 			have_build = true;
   14301 		} else
   14302 			minor = nvm_data & 0x00ff;
   14303 
   14304 		/* Decimal */
   14305 		minor = (minor / 16) * 10 + (minor % 16);
   14306 		sc->sc_nvm_ver_major = major;
   14307 		sc->sc_nvm_ver_minor = minor;
   14308 
   14309 printver:
   14310 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   14311 		    sc->sc_nvm_ver_minor);
   14312 		if (have_build) {
   14313 			sc->sc_nvm_ver_build = build;
   14314 			aprint_verbose(".%d", build);
   14315 		}
   14316 	}
   14317 
   14318 	/* Assume the Option ROM area is at avove NVM_SIZE */
   14319 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   14320 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   14321 		/* Option ROM Version */
   14322 		if ((off != 0x0000) && (off != 0xffff)) {
   14323 			int rv;
   14324 
   14325 			off += NVM_COMBO_VER_OFF;
   14326 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   14327 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   14328 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   14329 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   14330 				/* 16bits */
   14331 				major = uid0 >> 8;
   14332 				build = (uid0 << 8) | (uid1 >> 8);
   14333 				patch = uid1 & 0x00ff;
   14334 				aprint_verbose(", option ROM Version %d.%d.%d",
   14335 				    major, build, patch);
   14336 			}
   14337 		}
   14338 	}
   14339 
   14340 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   14341 		aprint_verbose(", Image Unique ID %08x",
   14342 		    ((uint32_t)uid1 << 16) | uid0);
   14343 }
   14344 
   14345 /*
   14346  * wm_nvm_read:
   14347  *
   14348  *	Read data from the serial EEPROM.
   14349  */
   14350 static int
   14351 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   14352 {
   14353 	int rv;
   14354 
   14355 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
   14356 		device_xname(sc->sc_dev), __func__));
   14357 
   14358 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   14359 		return -1;
   14360 
   14361 	rv = sc->nvm.read(sc, word, wordcnt, data);
   14362 
   14363 	return rv;
   14364 }
   14365 
   14366 /*
   14367  * Hardware semaphores.
   14368  * Very complexed...
   14369  */
   14370 
   14371 static int
   14372 wm_get_null(struct wm_softc *sc)
   14373 {
   14374 
   14375 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14376 		device_xname(sc->sc_dev), __func__));
   14377 	return 0;
   14378 }
   14379 
   14380 static void
   14381 wm_put_null(struct wm_softc *sc)
   14382 {
   14383 
   14384 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14385 		device_xname(sc->sc_dev), __func__));
   14386 	return;
   14387 }
   14388 
   14389 static int
   14390 wm_get_eecd(struct wm_softc *sc)
   14391 {
   14392 	uint32_t reg;
   14393 	int x;
   14394 
   14395 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14396 		device_xname(sc->sc_dev), __func__));
   14397 
   14398 	reg = CSR_READ(sc, WMREG_EECD);
   14399 
   14400 	/* Request EEPROM access. */
   14401 	reg |= EECD_EE_REQ;
   14402 	CSR_WRITE(sc, WMREG_EECD, reg);
   14403 
   14404 	/* ..and wait for it to be granted. */
   14405 	for (x = 0; x < 1000; x++) {
   14406 		reg = CSR_READ(sc, WMREG_EECD);
   14407 		if (reg & EECD_EE_GNT)
   14408 			break;
   14409 		delay(5);
   14410 	}
   14411 	if ((reg & EECD_EE_GNT) == 0) {
   14412 		aprint_error_dev(sc->sc_dev,
   14413 		    "could not acquire EEPROM GNT\n");
   14414 		reg &= ~EECD_EE_REQ;
   14415 		CSR_WRITE(sc, WMREG_EECD, reg);
   14416 		return -1;
   14417 	}
   14418 
   14419 	return 0;
   14420 }
   14421 
   14422 static void
   14423 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   14424 {
   14425 
   14426 	*eecd |= EECD_SK;
   14427 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14428 	CSR_WRITE_FLUSH(sc);
   14429 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14430 		delay(1);
   14431 	else
   14432 		delay(50);
   14433 }
   14434 
   14435 static void
   14436 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   14437 {
   14438 
   14439 	*eecd &= ~EECD_SK;
   14440 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   14441 	CSR_WRITE_FLUSH(sc);
   14442 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   14443 		delay(1);
   14444 	else
   14445 		delay(50);
   14446 }
   14447 
   14448 static void
   14449 wm_put_eecd(struct wm_softc *sc)
   14450 {
   14451 	uint32_t reg;
   14452 
   14453 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14454 		device_xname(sc->sc_dev), __func__));
   14455 
   14456 	/* Stop nvm */
   14457 	reg = CSR_READ(sc, WMREG_EECD);
   14458 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14459 		/* Pull CS high */
   14460 		reg |= EECD_CS;
   14461 		wm_nvm_eec_clock_lower(sc, &reg);
   14462 	} else {
   14463 		/* CS on Microwire is active-high */
   14464 		reg &= ~(EECD_CS | EECD_DI);
   14465 		CSR_WRITE(sc, WMREG_EECD, reg);
   14466 		wm_nvm_eec_clock_raise(sc, &reg);
   14467 		wm_nvm_eec_clock_lower(sc, &reg);
   14468 	}
   14469 
   14470 	reg = CSR_READ(sc, WMREG_EECD);
   14471 	reg &= ~EECD_EE_REQ;
   14472 	CSR_WRITE(sc, WMREG_EECD, reg);
   14473 
   14474 	return;
   14475 }
   14476 
   14477 /*
   14478  * Get hardware semaphore.
   14479  * Same as e1000_get_hw_semaphore_generic()
   14480  */
   14481 static int
   14482 wm_get_swsm_semaphore(struct wm_softc *sc)
   14483 {
   14484 	int32_t timeout;
   14485 	uint32_t swsm;
   14486 
   14487 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14488 		device_xname(sc->sc_dev), __func__));
   14489 	KASSERT(sc->sc_nvm_wordsize > 0);
   14490 
   14491 retry:
   14492 	/* Get the SW semaphore. */
   14493 	timeout = sc->sc_nvm_wordsize + 1;
   14494 	while (timeout) {
   14495 		swsm = CSR_READ(sc, WMREG_SWSM);
   14496 
   14497 		if ((swsm & SWSM_SMBI) == 0)
   14498 			break;
   14499 
   14500 		delay(50);
   14501 		timeout--;
   14502 	}
   14503 
   14504 	if (timeout == 0) {
   14505 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14506 			/*
   14507 			 * In rare circumstances, the SW semaphore may already
   14508 			 * be held unintentionally. Clear the semaphore once
   14509 			 * before giving up.
   14510 			 */
   14511 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14512 			wm_put_swsm_semaphore(sc);
   14513 			goto retry;
   14514 		}
   14515 		aprint_error_dev(sc->sc_dev,
   14516 		    "could not acquire SWSM SMBI\n");
   14517 		return 1;
   14518 	}
   14519 
   14520 	/* Get the FW semaphore. */
   14521 	timeout = sc->sc_nvm_wordsize + 1;
   14522 	while (timeout) {
   14523 		swsm = CSR_READ(sc, WMREG_SWSM);
   14524 		swsm |= SWSM_SWESMBI;
   14525 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14526 		/* If we managed to set the bit we got the semaphore. */
   14527 		swsm = CSR_READ(sc, WMREG_SWSM);
   14528 		if (swsm & SWSM_SWESMBI)
   14529 			break;
   14530 
   14531 		delay(50);
   14532 		timeout--;
   14533 	}
   14534 
   14535 	if (timeout == 0) {
   14536 		aprint_error_dev(sc->sc_dev,
   14537 		    "could not acquire SWSM SWESMBI\n");
   14538 		/* Release semaphores */
   14539 		wm_put_swsm_semaphore(sc);
   14540 		return 1;
   14541 	}
   14542 	return 0;
   14543 }
   14544 
   14545 /*
   14546  * Put hardware semaphore.
   14547  * Same as e1000_put_hw_semaphore_generic()
   14548  */
   14549 static void
   14550 wm_put_swsm_semaphore(struct wm_softc *sc)
   14551 {
   14552 	uint32_t swsm;
   14553 
   14554 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14555 		device_xname(sc->sc_dev), __func__));
   14556 
   14557 	swsm = CSR_READ(sc, WMREG_SWSM);
   14558 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14559 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14560 }
   14561 
   14562 /*
   14563  * Get SW/FW semaphore.
   14564  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14565  */
   14566 static int
   14567 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14568 {
   14569 	uint32_t swfw_sync;
   14570 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14571 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14572 	int timeout;
   14573 
   14574 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14575 		device_xname(sc->sc_dev), __func__));
   14576 
   14577 	if (sc->sc_type == WM_T_80003)
   14578 		timeout = 50;
   14579 	else
   14580 		timeout = 200;
   14581 
   14582 	while (timeout) {
   14583 		if (wm_get_swsm_semaphore(sc)) {
   14584 			aprint_error_dev(sc->sc_dev,
   14585 			    "%s: failed to get semaphore\n",
   14586 			    __func__);
   14587 			return 1;
   14588 		}
   14589 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14590 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14591 			swfw_sync |= swmask;
   14592 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14593 			wm_put_swsm_semaphore(sc);
   14594 			return 0;
   14595 		}
   14596 		wm_put_swsm_semaphore(sc);
   14597 		delay(5000);
   14598 		timeout--;
   14599 	}
   14600 	device_printf(sc->sc_dev,
   14601 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14602 	    mask, swfw_sync);
   14603 	return 1;
   14604 }
   14605 
   14606 static void
   14607 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14608 {
   14609 	uint32_t swfw_sync;
   14610 
   14611 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14612 		device_xname(sc->sc_dev), __func__));
   14613 
   14614 	while (wm_get_swsm_semaphore(sc) != 0)
   14615 		continue;
   14616 
   14617 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14618 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14619 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14620 
   14621 	wm_put_swsm_semaphore(sc);
   14622 }
   14623 
   14624 static int
   14625 wm_get_nvm_80003(struct wm_softc *sc)
   14626 {
   14627 	int rv;
   14628 
   14629 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14630 		device_xname(sc->sc_dev), __func__));
   14631 
   14632 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14633 		aprint_error_dev(sc->sc_dev,
   14634 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14635 		return rv;
   14636 	}
   14637 
   14638 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14639 	    && (rv = wm_get_eecd(sc)) != 0) {
   14640 		aprint_error_dev(sc->sc_dev,
   14641 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14642 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14643 		return rv;
   14644 	}
   14645 
   14646 	return 0;
   14647 }
   14648 
   14649 static void
   14650 wm_put_nvm_80003(struct wm_softc *sc)
   14651 {
   14652 
   14653 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14654 		device_xname(sc->sc_dev), __func__));
   14655 
   14656 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14657 		wm_put_eecd(sc);
   14658 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14659 }
   14660 
   14661 static int
   14662 wm_get_nvm_82571(struct wm_softc *sc)
   14663 {
   14664 	int rv;
   14665 
   14666 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14667 		device_xname(sc->sc_dev), __func__));
   14668 
   14669 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14670 		return rv;
   14671 
   14672 	switch (sc->sc_type) {
   14673 	case WM_T_82573:
   14674 		break;
   14675 	default:
   14676 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14677 			rv = wm_get_eecd(sc);
   14678 		break;
   14679 	}
   14680 
   14681 	if (rv != 0) {
   14682 		aprint_error_dev(sc->sc_dev,
   14683 		    "%s: failed to get semaphore\n",
   14684 		    __func__);
   14685 		wm_put_swsm_semaphore(sc);
   14686 	}
   14687 
   14688 	return rv;
   14689 }
   14690 
   14691 static void
   14692 wm_put_nvm_82571(struct wm_softc *sc)
   14693 {
   14694 
   14695 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14696 		device_xname(sc->sc_dev), __func__));
   14697 
   14698 	switch (sc->sc_type) {
   14699 	case WM_T_82573:
   14700 		break;
   14701 	default:
   14702 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14703 			wm_put_eecd(sc);
   14704 		break;
   14705 	}
   14706 
   14707 	wm_put_swsm_semaphore(sc);
   14708 }
   14709 
   14710 static int
   14711 wm_get_phy_82575(struct wm_softc *sc)
   14712 {
   14713 
   14714 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14715 		device_xname(sc->sc_dev), __func__));
   14716 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14717 }
   14718 
   14719 static void
   14720 wm_put_phy_82575(struct wm_softc *sc)
   14721 {
   14722 
   14723 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14724 		device_xname(sc->sc_dev), __func__));
   14725 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14726 }
   14727 
   14728 static int
   14729 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14730 {
   14731 	uint32_t ext_ctrl;
   14732 	int timeout = 200;
   14733 
   14734 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14735 		device_xname(sc->sc_dev), __func__));
   14736 
   14737 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14738 	for (timeout = 0; timeout < 200; timeout++) {
   14739 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14740 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14741 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14742 
   14743 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14744 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14745 			return 0;
   14746 		delay(5000);
   14747 	}
   14748 	device_printf(sc->sc_dev,
   14749 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14750 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14751 	return 1;
   14752 }
   14753 
   14754 static void
   14755 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14756 {
   14757 	uint32_t ext_ctrl;
   14758 
   14759 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14760 		device_xname(sc->sc_dev), __func__));
   14761 
   14762 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14763 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14764 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14765 
   14766 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14767 }
   14768 
   14769 static int
   14770 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14771 {
   14772 	uint32_t ext_ctrl;
   14773 	int timeout;
   14774 
   14775 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14776 		device_xname(sc->sc_dev), __func__));
   14777 	mutex_enter(sc->sc_ich_phymtx);
   14778 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14779 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14780 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14781 			break;
   14782 		delay(1000);
   14783 	}
   14784 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14785 		device_printf(sc->sc_dev,
   14786 		    "SW has already locked the resource\n");
   14787 		goto out;
   14788 	}
   14789 
   14790 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14791 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14792 	for (timeout = 0; timeout < 1000; timeout++) {
   14793 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14794 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14795 			break;
   14796 		delay(1000);
   14797 	}
   14798 	if (timeout >= 1000) {
   14799 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14800 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14801 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14802 		goto out;
   14803 	}
   14804 	return 0;
   14805 
   14806 out:
   14807 	mutex_exit(sc->sc_ich_phymtx);
   14808 	return 1;
   14809 }
   14810 
   14811 static void
   14812 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14813 {
   14814 	uint32_t ext_ctrl;
   14815 
   14816 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14817 		device_xname(sc->sc_dev), __func__));
   14818 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14819 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14820 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14821 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14822 	} else {
   14823 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14824 	}
   14825 
   14826 	mutex_exit(sc->sc_ich_phymtx);
   14827 }
   14828 
   14829 static int
   14830 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14831 {
   14832 
   14833 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14834 		device_xname(sc->sc_dev), __func__));
   14835 	mutex_enter(sc->sc_ich_nvmmtx);
   14836 
   14837 	return 0;
   14838 }
   14839 
   14840 static void
   14841 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14842 {
   14843 
   14844 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14845 		device_xname(sc->sc_dev), __func__));
   14846 	mutex_exit(sc->sc_ich_nvmmtx);
   14847 }
   14848 
   14849 static int
   14850 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14851 {
   14852 	int i = 0;
   14853 	uint32_t reg;
   14854 
   14855 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14856 		device_xname(sc->sc_dev), __func__));
   14857 
   14858 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14859 	do {
   14860 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14861 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14862 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14863 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14864 			break;
   14865 		delay(2*1000);
   14866 		i++;
   14867 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14868 
   14869 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14870 		wm_put_hw_semaphore_82573(sc);
   14871 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14872 		    device_xname(sc->sc_dev));
   14873 		return -1;
   14874 	}
   14875 
   14876 	return 0;
   14877 }
   14878 
   14879 static void
   14880 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14881 {
   14882 	uint32_t reg;
   14883 
   14884 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   14885 		device_xname(sc->sc_dev), __func__));
   14886 
   14887 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14888 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14889 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14890 }
   14891 
   14892 /*
   14893  * Management mode and power management related subroutines.
   14894  * BMC, AMT, suspend/resume and EEE.
   14895  */
   14896 
   14897 #ifdef WM_WOL
   14898 static int
   14899 wm_check_mng_mode(struct wm_softc *sc)
   14900 {
   14901 	int rv;
   14902 
   14903 	switch (sc->sc_type) {
   14904 	case WM_T_ICH8:
   14905 	case WM_T_ICH9:
   14906 	case WM_T_ICH10:
   14907 	case WM_T_PCH:
   14908 	case WM_T_PCH2:
   14909 	case WM_T_PCH_LPT:
   14910 	case WM_T_PCH_SPT:
   14911 	case WM_T_PCH_CNP:
   14912 		rv = wm_check_mng_mode_ich8lan(sc);
   14913 		break;
   14914 	case WM_T_82574:
   14915 	case WM_T_82583:
   14916 		rv = wm_check_mng_mode_82574(sc);
   14917 		break;
   14918 	case WM_T_82571:
   14919 	case WM_T_82572:
   14920 	case WM_T_82573:
   14921 	case WM_T_80003:
   14922 		rv = wm_check_mng_mode_generic(sc);
   14923 		break;
   14924 	default:
   14925 		/* Noting to do */
   14926 		rv = 0;
   14927 		break;
   14928 	}
   14929 
   14930 	return rv;
   14931 }
   14932 
   14933 static int
   14934 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14935 {
   14936 	uint32_t fwsm;
   14937 
   14938 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14939 
   14940 	if (((fwsm & FWSM_FW_VALID) != 0)
   14941 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14942 		return 1;
   14943 
   14944 	return 0;
   14945 }
   14946 
   14947 static int
   14948 wm_check_mng_mode_82574(struct wm_softc *sc)
   14949 {
   14950 	uint16_t data;
   14951 
   14952 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14953 
   14954 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14955 		return 1;
   14956 
   14957 	return 0;
   14958 }
   14959 
   14960 static int
   14961 wm_check_mng_mode_generic(struct wm_softc *sc)
   14962 {
   14963 	uint32_t fwsm;
   14964 
   14965 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14966 
   14967 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14968 		return 1;
   14969 
   14970 	return 0;
   14971 }
   14972 #endif /* WM_WOL */
   14973 
   14974 static int
   14975 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14976 {
   14977 	uint32_t manc, fwsm, factps;
   14978 
   14979 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14980 		return 0;
   14981 
   14982 	manc = CSR_READ(sc, WMREG_MANC);
   14983 
   14984 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14985 		device_xname(sc->sc_dev), manc));
   14986 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14987 		return 0;
   14988 
   14989 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14990 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14991 		factps = CSR_READ(sc, WMREG_FACTPS);
   14992 		if (((factps & FACTPS_MNGCG) == 0)
   14993 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14994 			return 1;
   14995 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14996 		uint16_t data;
   14997 
   14998 		factps = CSR_READ(sc, WMREG_FACTPS);
   14999 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   15000 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   15001 			device_xname(sc->sc_dev), factps, data));
   15002 		if (((factps & FACTPS_MNGCG) == 0)
   15003 		    && ((data & NVM_CFG2_MNGM_MASK)
   15004 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   15005 			return 1;
   15006 	} else if (((manc & MANC_SMBUS_EN) != 0)
   15007 	    && ((manc & MANC_ASF_EN) == 0))
   15008 		return 1;
   15009 
   15010 	return 0;
   15011 }
   15012 
   15013 static bool
   15014 wm_phy_resetisblocked(struct wm_softc *sc)
   15015 {
   15016 	bool blocked = false;
   15017 	uint32_t reg;
   15018 	int i = 0;
   15019 
   15020 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15021 		device_xname(sc->sc_dev), __func__));
   15022 
   15023 	switch (sc->sc_type) {
   15024 	case WM_T_ICH8:
   15025 	case WM_T_ICH9:
   15026 	case WM_T_ICH10:
   15027 	case WM_T_PCH:
   15028 	case WM_T_PCH2:
   15029 	case WM_T_PCH_LPT:
   15030 	case WM_T_PCH_SPT:
   15031 	case WM_T_PCH_CNP:
   15032 		do {
   15033 			reg = CSR_READ(sc, WMREG_FWSM);
   15034 			if ((reg & FWSM_RSPCIPHY) == 0) {
   15035 				blocked = true;
   15036 				delay(10*1000);
   15037 				continue;
   15038 			}
   15039 			blocked = false;
   15040 		} while (blocked && (i++ < 30));
   15041 		return blocked;
   15042 		break;
   15043 	case WM_T_82571:
   15044 	case WM_T_82572:
   15045 	case WM_T_82573:
   15046 	case WM_T_82574:
   15047 	case WM_T_82583:
   15048 	case WM_T_80003:
   15049 		reg = CSR_READ(sc, WMREG_MANC);
   15050 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   15051 			return true;
   15052 		else
   15053 			return false;
   15054 		break;
   15055 	default:
   15056 		/* No problem */
   15057 		break;
   15058 	}
   15059 
   15060 	return false;
   15061 }
   15062 
   15063 static void
   15064 wm_get_hw_control(struct wm_softc *sc)
   15065 {
   15066 	uint32_t reg;
   15067 
   15068 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15069 		device_xname(sc->sc_dev), __func__));
   15070 
   15071 	if (sc->sc_type == WM_T_82573) {
   15072 		reg = CSR_READ(sc, WMREG_SWSM);
   15073 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   15074 	} else if (sc->sc_type >= WM_T_82571) {
   15075 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15076 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   15077 	}
   15078 }
   15079 
   15080 static void
   15081 wm_release_hw_control(struct wm_softc *sc)
   15082 {
   15083 	uint32_t reg;
   15084 
   15085 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
   15086 		device_xname(sc->sc_dev), __func__));
   15087 
   15088 	if (sc->sc_type == WM_T_82573) {
   15089 		reg = CSR_READ(sc, WMREG_SWSM);
   15090 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   15091 	} else if (sc->sc_type >= WM_T_82571) {
   15092 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15093 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   15094 	}
   15095 }
   15096 
   15097 static void
   15098 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   15099 {
   15100 	uint32_t reg;
   15101 
   15102 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15103 		device_xname(sc->sc_dev), __func__));
   15104 
   15105 	if (sc->sc_type < WM_T_PCH2)
   15106 		return;
   15107 
   15108 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   15109 
   15110 	if (gate)
   15111 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   15112 	else
   15113 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   15114 
   15115 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   15116 }
   15117 
   15118 static int
   15119 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   15120 {
   15121 	uint32_t fwsm, reg;
   15122 	int rv = 0;
   15123 
   15124 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15125 		device_xname(sc->sc_dev), __func__));
   15126 
   15127 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   15128 	wm_gate_hw_phy_config_ich8lan(sc, true);
   15129 
   15130 	/* Disable ULP */
   15131 	wm_ulp_disable(sc);
   15132 
   15133 	/* Acquire PHY semaphore */
   15134 	rv = sc->phy.acquire(sc);
   15135 	if (rv != 0) {
   15136 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15137 		device_xname(sc->sc_dev), __func__));
   15138 		return -1;
   15139 	}
   15140 
   15141 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   15142 	 * inaccessible and resetting the PHY is not blocked, toggle the
   15143 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   15144 	 */
   15145 	fwsm = CSR_READ(sc, WMREG_FWSM);
   15146 	switch (sc->sc_type) {
   15147 	case WM_T_PCH_LPT:
   15148 	case WM_T_PCH_SPT:
   15149 	case WM_T_PCH_CNP:
   15150 		if (wm_phy_is_accessible_pchlan(sc))
   15151 			break;
   15152 
   15153 		/* Before toggling LANPHYPC, see if PHY is accessible by
   15154 		 * forcing MAC to SMBus mode first.
   15155 		 */
   15156 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15157 		reg |= CTRL_EXT_FORCE_SMBUS;
   15158 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15159 #if 0
   15160 		/* XXX Isn't this required??? */
   15161 		CSR_WRITE_FLUSH(sc);
   15162 #endif
   15163 		/* Wait 50 milliseconds for MAC to finish any retries
   15164 		 * that it might be trying to perform from previous
   15165 		 * attempts to acknowledge any phy read requests.
   15166 		 */
   15167 		delay(50 * 1000);
   15168 		/* FALLTHROUGH */
   15169 	case WM_T_PCH2:
   15170 		if (wm_phy_is_accessible_pchlan(sc) == true)
   15171 			break;
   15172 		/* FALLTHROUGH */
   15173 	case WM_T_PCH:
   15174 		if (sc->sc_type == WM_T_PCH)
   15175 			if ((fwsm & FWSM_FW_VALID) != 0)
   15176 				break;
   15177 
   15178 		if (wm_phy_resetisblocked(sc) == true) {
   15179 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   15180 			break;
   15181 		}
   15182 
   15183 		/* Toggle LANPHYPC Value bit */
   15184 		wm_toggle_lanphypc_pch_lpt(sc);
   15185 
   15186 		if (sc->sc_type >= WM_T_PCH_LPT) {
   15187 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15188 				break;
   15189 
   15190 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   15191 			 * so ensure that the MAC is also out of SMBus mode
   15192 			 */
   15193 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15194 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15195 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15196 
   15197 			if (wm_phy_is_accessible_pchlan(sc) == true)
   15198 				break;
   15199 			rv = -1;
   15200 		}
   15201 		break;
   15202 	default:
   15203 		break;
   15204 	}
   15205 
   15206 	/* Release semaphore */
   15207 	sc->phy.release(sc);
   15208 
   15209 	if (rv == 0) {
   15210 		/* Check to see if able to reset PHY.  Print error if not */
   15211 		if (wm_phy_resetisblocked(sc)) {
   15212 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15213 			goto out;
   15214 		}
   15215 
   15216 		/* Reset the PHY before any access to it.  Doing so, ensures
   15217 		 * that the PHY is in a known good state before we read/write
   15218 		 * PHY registers.  The generic reset is sufficient here,
   15219 		 * because we haven't determined the PHY type yet.
   15220 		 */
   15221 		if (wm_reset_phy(sc) != 0)
   15222 			goto out;
   15223 
   15224 		/* On a successful reset, possibly need to wait for the PHY
   15225 		 * to quiesce to an accessible state before returning control
   15226 		 * to the calling function.  If the PHY does not quiesce, then
   15227 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   15228 		 *  the PHY is in.
   15229 		 */
   15230 		if (wm_phy_resetisblocked(sc))
   15231 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   15232 	}
   15233 
   15234 out:
   15235 	/* Ungate automatic PHY configuration on non-managed 82579 */
   15236 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   15237 		delay(10*1000);
   15238 		wm_gate_hw_phy_config_ich8lan(sc, false);
   15239 	}
   15240 
   15241 	return 0;
   15242 }
   15243 
   15244 static void
   15245 wm_init_manageability(struct wm_softc *sc)
   15246 {
   15247 
   15248 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15249 		device_xname(sc->sc_dev), __func__));
   15250 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15251 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   15252 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15253 
   15254 		/* Disable hardware interception of ARP */
   15255 		manc &= ~MANC_ARP_EN;
   15256 
   15257 		/* Enable receiving management packets to the host */
   15258 		if (sc->sc_type >= WM_T_82571) {
   15259 			manc |= MANC_EN_MNG2HOST;
   15260 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   15261 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   15262 		}
   15263 
   15264 		CSR_WRITE(sc, WMREG_MANC, manc);
   15265 	}
   15266 }
   15267 
   15268 static void
   15269 wm_release_manageability(struct wm_softc *sc)
   15270 {
   15271 
   15272 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   15273 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   15274 
   15275 		manc |= MANC_ARP_EN;
   15276 		if (sc->sc_type >= WM_T_82571)
   15277 			manc &= ~MANC_EN_MNG2HOST;
   15278 
   15279 		CSR_WRITE(sc, WMREG_MANC, manc);
   15280 	}
   15281 }
   15282 
   15283 static void
   15284 wm_get_wakeup(struct wm_softc *sc)
   15285 {
   15286 
   15287 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   15288 	switch (sc->sc_type) {
   15289 	case WM_T_82573:
   15290 	case WM_T_82583:
   15291 		sc->sc_flags |= WM_F_HAS_AMT;
   15292 		/* FALLTHROUGH */
   15293 	case WM_T_80003:
   15294 	case WM_T_82575:
   15295 	case WM_T_82576:
   15296 	case WM_T_82580:
   15297 	case WM_T_I350:
   15298 	case WM_T_I354:
   15299 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   15300 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   15301 		/* FALLTHROUGH */
   15302 	case WM_T_82541:
   15303 	case WM_T_82541_2:
   15304 	case WM_T_82547:
   15305 	case WM_T_82547_2:
   15306 	case WM_T_82571:
   15307 	case WM_T_82572:
   15308 	case WM_T_82574:
   15309 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15310 		break;
   15311 	case WM_T_ICH8:
   15312 	case WM_T_ICH9:
   15313 	case WM_T_ICH10:
   15314 	case WM_T_PCH:
   15315 	case WM_T_PCH2:
   15316 	case WM_T_PCH_LPT:
   15317 	case WM_T_PCH_SPT:
   15318 	case WM_T_PCH_CNP:
   15319 		sc->sc_flags |= WM_F_HAS_AMT;
   15320 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   15321 		break;
   15322 	default:
   15323 		break;
   15324 	}
   15325 
   15326 	/* 1: HAS_MANAGE */
   15327 	if (wm_enable_mng_pass_thru(sc) != 0)
   15328 		sc->sc_flags |= WM_F_HAS_MANAGE;
   15329 
   15330 	/*
   15331 	 * Note that the WOL flags is set after the resetting of the eeprom
   15332 	 * stuff
   15333 	 */
   15334 }
   15335 
   15336 /*
   15337  * Unconfigure Ultra Low Power mode.
   15338  * Only for I217 and newer (see below).
   15339  */
   15340 static int
   15341 wm_ulp_disable(struct wm_softc *sc)
   15342 {
   15343 	uint32_t reg;
   15344 	uint16_t phyreg;
   15345 	int i = 0, rv = 0;
   15346 
   15347 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15348 		device_xname(sc->sc_dev), __func__));
   15349 	/* Exclude old devices */
   15350 	if ((sc->sc_type < WM_T_PCH_LPT)
   15351 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   15352 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   15353 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   15354 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   15355 		return 0;
   15356 
   15357 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   15358 		/* Request ME un-configure ULP mode in the PHY */
   15359 		reg = CSR_READ(sc, WMREG_H2ME);
   15360 		reg &= ~H2ME_ULP;
   15361 		reg |= H2ME_ENFORCE_SETTINGS;
   15362 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15363 
   15364 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   15365 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   15366 			if (i++ == 30) {
   15367 				device_printf(sc->sc_dev, "%s timed out\n",
   15368 				    __func__);
   15369 				return -1;
   15370 			}
   15371 			delay(10 * 1000);
   15372 		}
   15373 		reg = CSR_READ(sc, WMREG_H2ME);
   15374 		reg &= ~H2ME_ENFORCE_SETTINGS;
   15375 		CSR_WRITE(sc, WMREG_H2ME, reg);
   15376 
   15377 		return 0;
   15378 	}
   15379 
   15380 	/* Acquire semaphore */
   15381 	rv = sc->phy.acquire(sc);
   15382 	if (rv != 0) {
   15383 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
   15384 		device_xname(sc->sc_dev), __func__));
   15385 		return -1;
   15386 	}
   15387 
   15388 	/* Toggle LANPHYPC */
   15389 	wm_toggle_lanphypc_pch_lpt(sc);
   15390 
   15391 	/* Unforce SMBus mode in PHY */
   15392 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   15393 	if (rv != 0) {
   15394 		uint32_t reg2;
   15395 
   15396 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   15397 			__func__);
   15398 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   15399 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   15400 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   15401 		delay(50 * 1000);
   15402 
   15403 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   15404 		    &phyreg);
   15405 		if (rv != 0)
   15406 			goto release;
   15407 	}
   15408 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15409 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   15410 
   15411 	/* Unforce SMBus mode in MAC */
   15412 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15413 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   15414 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15415 
   15416 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   15417 	if (rv != 0)
   15418 		goto release;
   15419 	phyreg |= HV_PM_CTRL_K1_ENA;
   15420 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   15421 
   15422 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   15423 		&phyreg);
   15424 	if (rv != 0)
   15425 		goto release;
   15426 	phyreg &= ~(I218_ULP_CONFIG1_IND
   15427 	    | I218_ULP_CONFIG1_STICKY_ULP
   15428 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   15429 	    | I218_ULP_CONFIG1_WOL_HOST
   15430 	    | I218_ULP_CONFIG1_INBAND_EXIT
   15431 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   15432 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   15433 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   15434 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15435 	phyreg |= I218_ULP_CONFIG1_START;
   15436 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   15437 
   15438 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15439 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   15440 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15441 
   15442 release:
   15443 	/* Release semaphore */
   15444 	sc->phy.release(sc);
   15445 	wm_gmii_reset(sc);
   15446 	delay(50 * 1000);
   15447 
   15448 	return rv;
   15449 }
   15450 
   15451 /* WOL in the newer chipset interfaces (pchlan) */
   15452 static int
   15453 wm_enable_phy_wakeup(struct wm_softc *sc)
   15454 {
   15455 	device_t dev = sc->sc_dev;
   15456 	uint32_t mreg, moff;
   15457 	uint16_t wuce, wuc, wufc, preg;
   15458 	int i, rv;
   15459 
   15460 	KASSERT(sc->sc_type >= WM_T_PCH);
   15461 
   15462 	/* Copy MAC RARs to PHY RARs */
   15463 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15464 
   15465 	/* Activate PHY wakeup */
   15466 	rv = sc->phy.acquire(sc);
   15467 	if (rv != 0) {
   15468 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15469 		    __func__);
   15470 		return rv;
   15471 	}
   15472 
   15473 	/*
   15474 	 * Enable access to PHY wakeup registers.
   15475 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15476 	 */
   15477 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15478 	if (rv != 0) {
   15479 		device_printf(dev,
   15480 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15481 		goto release;
   15482 	}
   15483 
   15484 	/* Copy MAC MTA to PHY MTA */
   15485 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15486 		uint16_t lo, hi;
   15487 
   15488 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15489 		lo = (uint16_t)(mreg & 0xffff);
   15490 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15491 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15492 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15493 	}
   15494 
   15495 	/* Configure PHY Rx Control register */
   15496 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15497 	mreg = CSR_READ(sc, WMREG_RCTL);
   15498 	if (mreg & RCTL_UPE)
   15499 		preg |= BM_RCTL_UPE;
   15500 	if (mreg & RCTL_MPE)
   15501 		preg |= BM_RCTL_MPE;
   15502 	preg &= ~(BM_RCTL_MO_MASK);
   15503 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15504 	if (moff != 0)
   15505 		preg |= moff << BM_RCTL_MO_SHIFT;
   15506 	if (mreg & RCTL_BAM)
   15507 		preg |= BM_RCTL_BAM;
   15508 	if (mreg & RCTL_PMCF)
   15509 		preg |= BM_RCTL_PMCF;
   15510 	mreg = CSR_READ(sc, WMREG_CTRL);
   15511 	if (mreg & CTRL_RFCE)
   15512 		preg |= BM_RCTL_RFCE;
   15513 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15514 
   15515 	wuc = WUC_APME | WUC_PME_EN;
   15516 	wufc = WUFC_MAG;
   15517 	/* Enable PHY wakeup in MAC register */
   15518 	CSR_WRITE(sc, WMREG_WUC,
   15519 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15520 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15521 
   15522 	/* Configure and enable PHY wakeup in PHY registers */
   15523 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15524 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15525 
   15526 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15527 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15528 
   15529 release:
   15530 	sc->phy.release(sc);
   15531 
   15532 	return 0;
   15533 }
   15534 
   15535 /* Power down workaround on D3 */
   15536 static void
   15537 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15538 {
   15539 	uint32_t reg;
   15540 	uint16_t phyreg;
   15541 	int i;
   15542 
   15543 	for (i = 0; i < 2; i++) {
   15544 		/* Disable link */
   15545 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15546 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15547 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15548 
   15549 		/*
   15550 		 * Call gig speed drop workaround on Gig disable before
   15551 		 * accessing any PHY registers
   15552 		 */
   15553 		if (sc->sc_type == WM_T_ICH8)
   15554 			wm_gig_downshift_workaround_ich8lan(sc);
   15555 
   15556 		/* Write VR power-down enable */
   15557 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15558 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15559 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15560 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15561 
   15562 		/* Read it back and test */
   15563 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15564 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15565 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15566 			break;
   15567 
   15568 		/* Issue PHY reset and repeat at most one more time */
   15569 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15570 	}
   15571 }
   15572 
   15573 /*
   15574  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15575  *  @sc: pointer to the HW structure
   15576  *
   15577  *  During S0 to Sx transition, it is possible the link remains at gig
   15578  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15579  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15580  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15581  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15582  *  needs to be written.
   15583  *  Parts that support (and are linked to a partner which support) EEE in
   15584  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15585  *  than 10Mbps w/o EEE.
   15586  */
   15587 static void
   15588 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15589 {
   15590 	device_t dev = sc->sc_dev;
   15591 	struct ethercom *ec = &sc->sc_ethercom;
   15592 	uint32_t phy_ctrl;
   15593 	int rv;
   15594 
   15595 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15596 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15597 
   15598 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15599 
   15600 	if (sc->sc_phytype == WMPHY_I217) {
   15601 		uint16_t devid = sc->sc_pcidevid;
   15602 
   15603 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15604 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15605 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15606 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15607 		    (sc->sc_type >= WM_T_PCH_SPT))
   15608 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15609 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15610 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15611 
   15612 		if (sc->phy.acquire(sc) != 0)
   15613 			goto out;
   15614 
   15615 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15616 			uint16_t eee_advert;
   15617 
   15618 			rv = wm_read_emi_reg_locked(dev,
   15619 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15620 			if (rv)
   15621 				goto release;
   15622 
   15623 			/*
   15624 			 * Disable LPLU if both link partners support 100BaseT
   15625 			 * EEE and 100Full is advertised on both ends of the
   15626 			 * link, and enable Auto Enable LPI since there will
   15627 			 * be no driver to enable LPI while in Sx.
   15628 			 */
   15629 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15630 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15631 				uint16_t anar, phy_reg;
   15632 
   15633 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15634 				    &anar);
   15635 				if (anar & ANAR_TX_FD) {
   15636 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15637 					    PHY_CTRL_NOND0A_LPLU);
   15638 
   15639 					/* Set Auto Enable LPI after link up */
   15640 					sc->phy.readreg_locked(dev, 2,
   15641 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15642 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15643 					sc->phy.writereg_locked(dev, 2,
   15644 					    I217_LPI_GPIO_CTRL, phy_reg);
   15645 				}
   15646 			}
   15647 		}
   15648 
   15649 		/*
   15650 		 * For i217 Intel Rapid Start Technology support,
   15651 		 * when the system is going into Sx and no manageability engine
   15652 		 * is present, the driver must configure proxy to reset only on
   15653 		 * power good.	LPI (Low Power Idle) state must also reset only
   15654 		 * on power good, as well as the MTA (Multicast table array).
   15655 		 * The SMBus release must also be disabled on LCD reset.
   15656 		 */
   15657 
   15658 		/*
   15659 		 * Enable MTA to reset for Intel Rapid Start Technology
   15660 		 * Support
   15661 		 */
   15662 
   15663 release:
   15664 		sc->phy.release(sc);
   15665 	}
   15666 out:
   15667 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15668 
   15669 	if (sc->sc_type == WM_T_ICH8)
   15670 		wm_gig_downshift_workaround_ich8lan(sc);
   15671 
   15672 	if (sc->sc_type >= WM_T_PCH) {
   15673 		wm_oem_bits_config_ich8lan(sc, false);
   15674 
   15675 		/* Reset PHY to activate OEM bits on 82577/8 */
   15676 		if (sc->sc_type == WM_T_PCH)
   15677 			wm_reset_phy(sc);
   15678 
   15679 		if (sc->phy.acquire(sc) != 0)
   15680 			return;
   15681 		wm_write_smbus_addr(sc);
   15682 		sc->phy.release(sc);
   15683 	}
   15684 }
   15685 
   15686 /*
   15687  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15688  *  @sc: pointer to the HW structure
   15689  *
   15690  *  During Sx to S0 transitions on non-managed devices or managed devices
   15691  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15692  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15693  *  the PHY.
   15694  *  On i217, setup Intel Rapid Start Technology.
   15695  */
   15696 static int
   15697 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15698 {
   15699 	device_t dev = sc->sc_dev;
   15700 	int rv;
   15701 
   15702 	if (sc->sc_type < WM_T_PCH2)
   15703 		return 0;
   15704 
   15705 	rv = wm_init_phy_workarounds_pchlan(sc);
   15706 	if (rv != 0)
   15707 		return -1;
   15708 
   15709 	/* For i217 Intel Rapid Start Technology support when the system
   15710 	 * is transitioning from Sx and no manageability engine is present
   15711 	 * configure SMBus to restore on reset, disable proxy, and enable
   15712 	 * the reset on MTA (Multicast table array).
   15713 	 */
   15714 	if (sc->sc_phytype == WMPHY_I217) {
   15715 		uint16_t phy_reg;
   15716 
   15717 		if (sc->phy.acquire(sc) != 0)
   15718 			return -1;
   15719 
   15720 		/* Clear Auto Enable LPI after link up */
   15721 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15722 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15723 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15724 
   15725 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15726 			/* Restore clear on SMB if no manageability engine
   15727 			 * is present
   15728 			 */
   15729 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15730 			    &phy_reg);
   15731 			if (rv != 0)
   15732 				goto release;
   15733 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15734 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15735 
   15736 			/* Disable Proxy */
   15737 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15738 		}
   15739 		/* Enable reset on MTA */
   15740 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15741 		if (rv != 0)
   15742 			goto release;
   15743 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15744 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15745 
   15746 release:
   15747 		sc->phy.release(sc);
   15748 		return rv;
   15749 	}
   15750 
   15751 	return 0;
   15752 }
   15753 
   15754 static void
   15755 wm_enable_wakeup(struct wm_softc *sc)
   15756 {
   15757 	uint32_t reg, pmreg;
   15758 	pcireg_t pmode;
   15759 	int rv = 0;
   15760 
   15761 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15762 		device_xname(sc->sc_dev), __func__));
   15763 
   15764 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15765 	    &pmreg, NULL) == 0)
   15766 		return;
   15767 
   15768 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15769 		goto pme;
   15770 
   15771 	/* Advertise the wakeup capability */
   15772 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15773 	    | CTRL_SWDPIN(3));
   15774 
   15775 	/* Keep the laser running on fiber adapters */
   15776 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15777 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15778 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15779 		reg |= CTRL_EXT_SWDPIN(3);
   15780 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15781 	}
   15782 
   15783 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15784 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15785 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15786 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15787 		wm_suspend_workarounds_ich8lan(sc);
   15788 
   15789 #if 0	/* For the multicast packet */
   15790 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15791 	reg |= WUFC_MC;
   15792 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15793 #endif
   15794 
   15795 	if (sc->sc_type >= WM_T_PCH) {
   15796 		rv = wm_enable_phy_wakeup(sc);
   15797 		if (rv != 0)
   15798 			goto pme;
   15799 	} else {
   15800 		/* Enable wakeup by the MAC */
   15801 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15802 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15803 	}
   15804 
   15805 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15806 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15807 		|| (sc->sc_type == WM_T_PCH2))
   15808 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15809 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15810 
   15811 pme:
   15812 	/* Request PME */
   15813 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15814 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
   15815 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15816 		/* For WOL */
   15817 		pmode |= PCI_PMCSR_PME_EN;
   15818 	} else {
   15819 		/* Disable WOL */
   15820 		pmode &= ~PCI_PMCSR_PME_EN;
   15821 	}
   15822 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15823 }
   15824 
   15825 /* Disable ASPM L0s and/or L1 for workaround */
   15826 static void
   15827 wm_disable_aspm(struct wm_softc *sc)
   15828 {
   15829 	pcireg_t reg, mask = 0;
   15830 	unsigned const char *str = "";
   15831 
   15832 	/*
   15833 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15834 	 * space.
   15835 	 */
   15836 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15837 		return;
   15838 
   15839 	switch (sc->sc_type) {
   15840 	case WM_T_82571:
   15841 	case WM_T_82572:
   15842 		/*
   15843 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15844 		 * State Power management L1 State (ASPM L1).
   15845 		 */
   15846 		mask = PCIE_LCSR_ASPM_L1;
   15847 		str = "L1 is";
   15848 		break;
   15849 	case WM_T_82573:
   15850 	case WM_T_82574:
   15851 	case WM_T_82583:
   15852 		/*
   15853 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15854 		 *
   15855 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15856 		 * some chipset.  The document of 82574 and 82583 says that
   15857 		 * disabling L0s with some specific chipset is sufficient,
   15858 		 * but we follow as of the Intel em driver does.
   15859 		 *
   15860 		 * References:
   15861 		 * Errata 8 of the Specification Update of i82573.
   15862 		 * Errata 20 of the Specification Update of i82574.
   15863 		 * Errata 9 of the Specification Update of i82583.
   15864 		 */
   15865 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15866 		str = "L0s and L1 are";
   15867 		break;
   15868 	default:
   15869 		return;
   15870 	}
   15871 
   15872 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15873 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15874 	reg &= ~mask;
   15875 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15876 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15877 
   15878 	/* Print only in wm_attach() */
   15879 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15880 		aprint_verbose_dev(sc->sc_dev,
   15881 		    "ASPM %s disabled to workaround the errata.\n", str);
   15882 }
   15883 
   15884 /* LPLU */
   15885 
   15886 static void
   15887 wm_lplu_d0_disable(struct wm_softc *sc)
   15888 {
   15889 	struct mii_data *mii = &sc->sc_mii;
   15890 	uint32_t reg;
   15891 	uint16_t phyval;
   15892 
   15893 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   15894 		device_xname(sc->sc_dev), __func__));
   15895 
   15896 	if (sc->sc_phytype == WMPHY_IFE)
   15897 		return;
   15898 
   15899 	switch (sc->sc_type) {
   15900 	case WM_T_82571:
   15901 	case WM_T_82572:
   15902 	case WM_T_82573:
   15903 	case WM_T_82575:
   15904 	case WM_T_82576:
   15905 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
   15906 		phyval &= ~PMR_D0_LPLU;
   15907 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
   15908 		break;
   15909 	case WM_T_82580:
   15910 	case WM_T_I350:
   15911 	case WM_T_I210:
   15912 	case WM_T_I211:
   15913 		reg = CSR_READ(sc, WMREG_PHPM);
   15914 		reg &= ~PHPM_D0A_LPLU;
   15915 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15916 		break;
   15917 	case WM_T_82574:
   15918 	case WM_T_82583:
   15919 	case WM_T_ICH8:
   15920 	case WM_T_ICH9:
   15921 	case WM_T_ICH10:
   15922 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15923 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15924 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15925 		CSR_WRITE_FLUSH(sc);
   15926 		break;
   15927 	case WM_T_PCH:
   15928 	case WM_T_PCH2:
   15929 	case WM_T_PCH_LPT:
   15930 	case WM_T_PCH_SPT:
   15931 	case WM_T_PCH_CNP:
   15932 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15933 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15934 		if (wm_phy_resetisblocked(sc) == false)
   15935 			phyval |= HV_OEM_BITS_ANEGNOW;
   15936 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15937 		break;
   15938 	default:
   15939 		break;
   15940 	}
   15941 }
   15942 
   15943 /* EEE */
   15944 
   15945 static int
   15946 wm_set_eee_i350(struct wm_softc *sc)
   15947 {
   15948 	struct ethercom *ec = &sc->sc_ethercom;
   15949 	uint32_t ipcnfg, eeer;
   15950 	uint32_t ipcnfg_mask
   15951 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15952 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15953 
   15954 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15955 
   15956 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15957 	eeer = CSR_READ(sc, WMREG_EEER);
   15958 
   15959 	/* Enable or disable per user setting */
   15960 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15961 		ipcnfg |= ipcnfg_mask;
   15962 		eeer |= eeer_mask;
   15963 	} else {
   15964 		ipcnfg &= ~ipcnfg_mask;
   15965 		eeer &= ~eeer_mask;
   15966 	}
   15967 
   15968 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15969 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15970 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15971 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15972 
   15973 	return 0;
   15974 }
   15975 
   15976 static int
   15977 wm_set_eee_pchlan(struct wm_softc *sc)
   15978 {
   15979 	device_t dev = sc->sc_dev;
   15980 	struct ethercom *ec = &sc->sc_ethercom;
   15981 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15982 	int rv = 0;
   15983 
   15984 	switch (sc->sc_phytype) {
   15985 	case WMPHY_82579:
   15986 		lpa = I82579_EEE_LP_ABILITY;
   15987 		pcs_status = I82579_EEE_PCS_STATUS;
   15988 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15989 		break;
   15990 	case WMPHY_I217:
   15991 		lpa = I217_EEE_LP_ABILITY;
   15992 		pcs_status = I217_EEE_PCS_STATUS;
   15993 		adv_addr = I217_EEE_ADVERTISEMENT;
   15994 		break;
   15995 	default:
   15996 		return 0;
   15997 	}
   15998 
   15999 	if (sc->phy.acquire(sc)) {
   16000 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   16001 		return 0;
   16002 	}
   16003 
   16004 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   16005 	if (rv != 0)
   16006 		goto release;
   16007 
   16008 	/* Clear bits that enable EEE in various speeds */
   16009 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   16010 
   16011 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   16012 		/* Save off link partner's EEE ability */
   16013 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   16014 		if (rv != 0)
   16015 			goto release;
   16016 
   16017 		/* Read EEE advertisement */
   16018 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   16019 			goto release;
   16020 
   16021 		/*
   16022 		 * Enable EEE only for speeds in which the link partner is
   16023 		 * EEE capable and for which we advertise EEE.
   16024 		 */
   16025 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   16026 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   16027 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   16028 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   16029 			if ((data & ANLPAR_TX_FD) != 0)
   16030 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   16031 			else {
   16032 				/*
   16033 				 * EEE is not supported in 100Half, so ignore
   16034 				 * partner's EEE in 100 ability if full-duplex
   16035 				 * is not advertised.
   16036 				 */
   16037 				sc->eee_lp_ability
   16038 				    &= ~AN_EEEADVERT_100_TX;
   16039 			}
   16040 		}
   16041 	}
   16042 
   16043 	if (sc->sc_phytype == WMPHY_82579) {
   16044 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   16045 		if (rv != 0)
   16046 			goto release;
   16047 
   16048 		data &= ~I82579_LPI_PLL_SHUT_100;
   16049 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   16050 	}
   16051 
   16052 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   16053 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   16054 		goto release;
   16055 
   16056 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   16057 release:
   16058 	sc->phy.release(sc);
   16059 
   16060 	return rv;
   16061 }
   16062 
   16063 static int
   16064 wm_set_eee(struct wm_softc *sc)
   16065 {
   16066 	struct ethercom *ec = &sc->sc_ethercom;
   16067 
   16068 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   16069 		return 0;
   16070 
   16071 	if (sc->sc_type == WM_T_I354) {
   16072 		/* I354 uses an external PHY */
   16073 		return 0; /* not yet */
   16074 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   16075 		return wm_set_eee_i350(sc);
   16076 	else if (sc->sc_type >= WM_T_PCH2)
   16077 		return wm_set_eee_pchlan(sc);
   16078 
   16079 	return 0;
   16080 }
   16081 
   16082 /*
   16083  * Workarounds (mainly PHY related).
   16084  * Basically, PHY's workarounds are in the PHY drivers.
   16085  */
   16086 
   16087 /* Workaround for 82566 Kumeran PCS lock loss */
   16088 static int
   16089 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   16090 {
   16091 	struct mii_data *mii = &sc->sc_mii;
   16092 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16093 	int i, reg, rv;
   16094 	uint16_t phyreg;
   16095 
   16096 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16097 		device_xname(sc->sc_dev), __func__));
   16098 
   16099 	/* If the link is not up, do nothing */
   16100 	if ((status & STATUS_LU) == 0)
   16101 		return 0;
   16102 
   16103 	/* Nothing to do if the link is other than 1Gbps */
   16104 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   16105 		return 0;
   16106 
   16107 	for (i = 0; i < 10; i++) {
   16108 		/* read twice */
   16109 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16110 		if (rv != 0)
   16111 			return rv;
   16112 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   16113 		if (rv != 0)
   16114 			return rv;
   16115 
   16116 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   16117 			goto out;	/* GOOD! */
   16118 
   16119 		/* Reset the PHY */
   16120 		wm_reset_phy(sc);
   16121 		delay(5*1000);
   16122 	}
   16123 
   16124 	/* Disable GigE link negotiation */
   16125 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   16126 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   16127 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   16128 
   16129 	/*
   16130 	 * Call gig speed drop workaround on Gig disable before accessing
   16131 	 * any PHY registers.
   16132 	 */
   16133 	wm_gig_downshift_workaround_ich8lan(sc);
   16134 
   16135 out:
   16136 	return 0;
   16137 }
   16138 
   16139 /*
   16140  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   16141  *  @sc: pointer to the HW structure
   16142  *
   16143  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   16144  *  LPLU, Gig disable, MDIC PHY reset):
   16145  *    1) Set Kumeran Near-end loopback
   16146  *    2) Clear Kumeran Near-end loopback
   16147  *  Should only be called for ICH8[m] devices with any 1G Phy.
   16148  */
   16149 static void
   16150 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   16151 {
   16152 	uint16_t kmreg;
   16153 
   16154 	/* Only for igp3 */
   16155 	if (sc->sc_phytype == WMPHY_IGP_3) {
   16156 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   16157 			return;
   16158 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   16159 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   16160 			return;
   16161 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   16162 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   16163 	}
   16164 }
   16165 
   16166 /*
   16167  * Workaround for pch's PHYs
   16168  * XXX should be moved to new PHY driver?
   16169  */
   16170 static int
   16171 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16172 {
   16173 	device_t dev = sc->sc_dev;
   16174 	struct mii_data *mii = &sc->sc_mii;
   16175 	struct mii_softc *child;
   16176 	uint16_t phy_data, phyrev = 0;
   16177 	int phytype = sc->sc_phytype;
   16178 	int rv;
   16179 
   16180 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16181 		device_xname(dev), __func__));
   16182 	KASSERT(sc->sc_type == WM_T_PCH);
   16183 
   16184 	/* Set MDIO slow mode before any other MDIO access */
   16185 	if (phytype == WMPHY_82577)
   16186 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   16187 			return rv;
   16188 
   16189 	child = LIST_FIRST(&mii->mii_phys);
   16190 	if (child != NULL)
   16191 		phyrev = child->mii_mpd_rev;
   16192 
   16193 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   16194 	if ((child != NULL) &&
   16195 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   16196 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   16197 		/* Disable generation of early preamble (0x4431) */
   16198 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16199 		    &phy_data);
   16200 		if (rv != 0)
   16201 			return rv;
   16202 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   16203 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   16204 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   16205 		    phy_data);
   16206 		if (rv != 0)
   16207 			return rv;
   16208 
   16209 		/* Preamble tuning for SSC */
   16210 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   16211 		if (rv != 0)
   16212 			return rv;
   16213 	}
   16214 
   16215 	/* 82578 */
   16216 	if (phytype == WMPHY_82578) {
   16217 		/*
   16218 		 * Return registers to default by doing a soft reset then
   16219 		 * writing 0x3140 to the control register
   16220 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   16221 		 */
   16222 		if ((child != NULL) && (phyrev < 2)) {
   16223 			PHY_RESET(child);
   16224 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   16225 			if (rv != 0)
   16226 				return rv;
   16227 		}
   16228 	}
   16229 
   16230 	/* Select page 0 */
   16231 	if ((rv = sc->phy.acquire(sc)) != 0)
   16232 		return rv;
   16233 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
   16234 	sc->phy.release(sc);
   16235 	if (rv != 0)
   16236 		return rv;
   16237 
   16238 	/*
   16239 	 * Configure the K1 Si workaround during phy reset assuming there is
   16240 	 * link so that it disables K1 if link is in 1Gbps.
   16241 	 */
   16242 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   16243 		return rv;
   16244 
   16245 	/* Workaround for link disconnects on a busy hub in half duplex */
   16246 	rv = sc->phy.acquire(sc);
   16247 	if (rv)
   16248 		return rv;
   16249 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   16250 	if (rv)
   16251 		goto release;
   16252 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   16253 	    phy_data & 0x00ff);
   16254 	if (rv)
   16255 		goto release;
   16256 
   16257 	/* Set MSE higher to enable link to stay up when noise is high */
   16258 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   16259 release:
   16260 	sc->phy.release(sc);
   16261 
   16262 	return rv;
   16263 }
   16264 
   16265 /*
   16266  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   16267  *  @sc:   pointer to the HW structure
   16268  */
   16269 static void
   16270 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   16271 {
   16272 
   16273 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16274 		device_xname(sc->sc_dev), __func__));
   16275 
   16276 	if (sc->phy.acquire(sc) != 0)
   16277 		return;
   16278 
   16279 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16280 
   16281 	sc->phy.release(sc);
   16282 }
   16283 
   16284 static void
   16285 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
   16286 {
   16287 	device_t dev = sc->sc_dev;
   16288 	uint32_t mac_reg;
   16289 	uint16_t i, wuce;
   16290 	int count;
   16291 
   16292 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16293 		device_xname(dev), __func__));
   16294 
   16295 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   16296 		return;
   16297 
   16298 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   16299 	count = wm_rar_count(sc);
   16300 	for (i = 0; i < count; i++) {
   16301 		uint16_t lo, hi;
   16302 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16303 		lo = (uint16_t)(mac_reg & 0xffff);
   16304 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   16305 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   16306 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   16307 
   16308 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16309 		lo = (uint16_t)(mac_reg & 0xffff);
   16310 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   16311 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   16312 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   16313 	}
   16314 
   16315 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   16316 }
   16317 
   16318 /*
   16319  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   16320  *  with 82579 PHY
   16321  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
   16322  */
   16323 static int
   16324 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
   16325 {
   16326 	device_t dev = sc->sc_dev;
   16327 	int rar_count;
   16328 	int rv;
   16329 	uint32_t mac_reg;
   16330 	uint16_t dft_ctrl, data;
   16331 	uint16_t i;
   16332 
   16333 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16334 		device_xname(dev), __func__));
   16335 
   16336 	if (sc->sc_type < WM_T_PCH2)
   16337 		return 0;
   16338 
   16339 	/* Acquire PHY semaphore */
   16340 	rv = sc->phy.acquire(sc);
   16341 	if (rv != 0)
   16342 		return rv;
   16343 
   16344 	/* Disable Rx path while enabling/disabling workaround */
   16345 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
   16346 	if (rv != 0)
   16347 		goto out;
   16348 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16349 	    dft_ctrl | (1 << 14));
   16350 	if (rv != 0)
   16351 		goto out;
   16352 
   16353 	if (enable) {
   16354 		/* Write Rx addresses (rar_entry_count for RAL/H, and
   16355 		 * SHRAL/H) and initial CRC values to the MAC
   16356 		 */
   16357 		rar_count = wm_rar_count(sc);
   16358 		for (i = 0; i < rar_count; i++) {
   16359 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
   16360 			uint32_t addr_high, addr_low;
   16361 
   16362 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   16363 			if (!(addr_high & RAL_AV))
   16364 				continue;
   16365 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   16366 			mac_addr[0] = (addr_low & 0xFF);
   16367 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
   16368 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
   16369 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
   16370 			mac_addr[4] = (addr_high & 0xFF);
   16371 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
   16372 
   16373 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
   16374 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
   16375 		}
   16376 
   16377 		/* Write Rx addresses to the PHY */
   16378 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
   16379 	}
   16380 
   16381 	/*
   16382 	 * If enable ==
   16383 	 *	true: Enable jumbo frame workaround in the MAC.
   16384 	 *	false: Write MAC register values back to h/w defaults.
   16385 	 */
   16386 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
   16387 	if (enable) {
   16388 		mac_reg &= ~(1 << 14);
   16389 		mac_reg |= (7 << 15);
   16390 	} else
   16391 		mac_reg &= ~(0xf << 14);
   16392 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
   16393 
   16394 	mac_reg = CSR_READ(sc, WMREG_RCTL);
   16395 	if (enable) {
   16396 		mac_reg |= RCTL_SECRC;
   16397 		sc->sc_rctl |= RCTL_SECRC;
   16398 		sc->sc_flags |= WM_F_CRC_STRIP;
   16399 	} else {
   16400 		mac_reg &= ~RCTL_SECRC;
   16401 		sc->sc_rctl &= ~RCTL_SECRC;
   16402 		sc->sc_flags &= ~WM_F_CRC_STRIP;
   16403 	}
   16404 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
   16405 
   16406 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
   16407 	if (rv != 0)
   16408 		goto out;
   16409 	if (enable)
   16410 		data |= 1 << 0;
   16411 	else
   16412 		data &= ~(1 << 0);
   16413 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
   16414 	if (rv != 0)
   16415 		goto out;
   16416 
   16417 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
   16418 	if (rv != 0)
   16419 		goto out;
   16420 	/*
   16421 	 * XXX FreeBSD and Linux do the same thing that they set the same value
   16422 	 * on both the enable case and the disable case. Is it correct?
   16423 	 */
   16424 	data &= ~(0xf << 8);
   16425 	data |= (0xb << 8);
   16426 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
   16427 	if (rv != 0)
   16428 		goto out;
   16429 
   16430 	/*
   16431 	 * If enable ==
   16432 	 *	true: Enable jumbo frame workaround in the PHY.
   16433 	 *	false: Write PHY register values back to h/w defaults.
   16434 	 */
   16435 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
   16436 	if (rv != 0)
   16437 		goto out;
   16438 	data &= ~(0x7F << 5);
   16439 	if (enable)
   16440 		data |= (0x37 << 5);
   16441 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
   16442 	if (rv != 0)
   16443 		goto out;
   16444 
   16445 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
   16446 	if (rv != 0)
   16447 		goto out;
   16448 	if (enable)
   16449 		data &= ~(1 << 13);
   16450 	else
   16451 		data |= (1 << 13);
   16452 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
   16453 	if (rv != 0)
   16454 		goto out;
   16455 
   16456 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
   16457 	if (rv != 0)
   16458 		goto out;
   16459 	data &= ~(0x3FF << 2);
   16460 	if (enable)
   16461 		data |= (I82579_TX_PTR_GAP << 2);
   16462 	else
   16463 		data |= (0x8 << 2);
   16464 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
   16465 	if (rv != 0)
   16466 		goto out;
   16467 
   16468 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
   16469 	    enable ? 0xf100 : 0x7e00);
   16470 	if (rv != 0)
   16471 		goto out;
   16472 
   16473 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
   16474 	if (rv != 0)
   16475 		goto out;
   16476 	if (enable)
   16477 		data |= 1 << 10;
   16478 	else
   16479 		data &= ~(1 << 10);
   16480 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
   16481 	if (rv != 0)
   16482 		goto out;
   16483 
   16484 	/* Re-enable Rx path after enabling/disabling workaround */
   16485 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
   16486 	    dft_ctrl & ~(1 << 14));
   16487 
   16488 out:
   16489 	sc->phy.release(sc);
   16490 
   16491 	return rv;
   16492 }
   16493 
   16494 /*
   16495  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   16496  *  done after every PHY reset.
   16497  */
   16498 static int
   16499 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   16500 {
   16501 	device_t dev = sc->sc_dev;
   16502 	int rv;
   16503 
   16504 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16505 		device_xname(dev), __func__));
   16506 	KASSERT(sc->sc_type == WM_T_PCH2);
   16507 
   16508 	/* Set MDIO slow mode before any other MDIO access */
   16509 	rv = wm_set_mdio_slow_mode_hv(sc);
   16510 	if (rv != 0)
   16511 		return rv;
   16512 
   16513 	rv = sc->phy.acquire(sc);
   16514 	if (rv != 0)
   16515 		return rv;
   16516 	/* Set MSE higher to enable link to stay up when noise is high */
   16517 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   16518 	if (rv != 0)
   16519 		goto release;
   16520 	/* Drop link after 5 times MSE threshold was reached */
   16521 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   16522 release:
   16523 	sc->phy.release(sc);
   16524 
   16525 	return rv;
   16526 }
   16527 
   16528 /**
   16529  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   16530  *  @link: link up bool flag
   16531  *
   16532  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   16533  *  preventing further DMA write requests.  Workaround the issue by disabling
   16534  *  the de-assertion of the clock request when in 1Gpbs mode.
   16535  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   16536  *  speeds in order to avoid Tx hangs.
   16537  **/
   16538 static int
   16539 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   16540 {
   16541 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   16542 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   16543 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   16544 	uint16_t phyreg;
   16545 
   16546 	if (link && (speed == STATUS_SPEED_1000)) {
   16547 		sc->phy.acquire(sc);
   16548 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16549 		    &phyreg);
   16550 		if (rv != 0)
   16551 			goto release;
   16552 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16553 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   16554 		if (rv != 0)
   16555 			goto release;
   16556 		delay(20);
   16557 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   16558 
   16559 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   16560 		    &phyreg);
   16561 release:
   16562 		sc->phy.release(sc);
   16563 		return rv;
   16564 	}
   16565 
   16566 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   16567 
   16568 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   16569 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   16570 	    || !link
   16571 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   16572 		goto update_fextnvm6;
   16573 
   16574 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   16575 
   16576 	/* Clear link status transmit timeout */
   16577 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   16578 	if (speed == STATUS_SPEED_100) {
   16579 		/* Set inband Tx timeout to 5x10us for 100Half */
   16580 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16581 
   16582 		/* Do not extend the K1 entry latency for 100Half */
   16583 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16584 	} else {
   16585 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   16586 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   16587 
   16588 		/* Extend the K1 entry latency for 10 Mbps */
   16589 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   16590 	}
   16591 
   16592 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   16593 
   16594 update_fextnvm6:
   16595 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   16596 	return 0;
   16597 }
   16598 
   16599 /*
   16600  *  wm_k1_gig_workaround_hv - K1 Si workaround
   16601  *  @sc:   pointer to the HW structure
   16602  *  @link: link up bool flag
   16603  *
   16604  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   16605  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   16606  *  If link is down, the function will restore the default K1 setting located
   16607  *  in the NVM.
   16608  */
   16609 static int
   16610 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   16611 {
   16612 	int k1_enable = sc->sc_nvm_k1_enabled;
   16613 
   16614 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16615 		device_xname(sc->sc_dev), __func__));
   16616 
   16617 	if (sc->phy.acquire(sc) != 0)
   16618 		return -1;
   16619 
   16620 	if (link) {
   16621 		k1_enable = 0;
   16622 
   16623 		/* Link stall fix for link up */
   16624 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16625 		    0x0100);
   16626 	} else {
   16627 		/* Link stall fix for link down */
   16628 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   16629 		    0x4100);
   16630 	}
   16631 
   16632 	wm_configure_k1_ich8lan(sc, k1_enable);
   16633 	sc->phy.release(sc);
   16634 
   16635 	return 0;
   16636 }
   16637 
   16638 /*
   16639  *  wm_k1_workaround_lv - K1 Si workaround
   16640  *  @sc:   pointer to the HW structure
   16641  *
   16642  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   16643  *  Disable K1 for 1000 and 100 speeds
   16644  */
   16645 static int
   16646 wm_k1_workaround_lv(struct wm_softc *sc)
   16647 {
   16648 	uint32_t reg;
   16649 	uint16_t phyreg;
   16650 	int rv;
   16651 
   16652 	if (sc->sc_type != WM_T_PCH2)
   16653 		return 0;
   16654 
   16655 	/* Set K1 beacon duration based on 10Mbps speed */
   16656 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16657 	if (rv != 0)
   16658 		return rv;
   16659 
   16660 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16661 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16662 		if (phyreg &
   16663 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16664 			/* LV 1G/100 Packet drop issue wa  */
   16665 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16666 			    &phyreg);
   16667 			if (rv != 0)
   16668 				return rv;
   16669 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16670 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16671 			    phyreg);
   16672 			if (rv != 0)
   16673 				return rv;
   16674 		} else {
   16675 			/* For 10Mbps */
   16676 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16677 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16678 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16679 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16680 		}
   16681 	}
   16682 
   16683 	return 0;
   16684 }
   16685 
   16686 /*
   16687  *  wm_link_stall_workaround_hv - Si workaround
   16688  *  @sc: pointer to the HW structure
   16689  *
   16690  *  This function works around a Si bug where the link partner can get
   16691  *  a link up indication before the PHY does. If small packets are sent
   16692  *  by the link partner they can be placed in the packet buffer without
   16693  *  being properly accounted for by the PHY and will stall preventing
   16694  *  further packets from being received.  The workaround is to clear the
   16695  *  packet buffer after the PHY detects link up.
   16696  */
   16697 static int
   16698 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16699 {
   16700 	uint16_t phyreg;
   16701 
   16702 	if (sc->sc_phytype != WMPHY_82578)
   16703 		return 0;
   16704 
   16705 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16706 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16707 	if ((phyreg & BMCR_LOOP) != 0)
   16708 		return 0;
   16709 
   16710 	/* Check if link is up and at 1Gbps */
   16711 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16712 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16713 	    | BM_CS_STATUS_SPEED_MASK;
   16714 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16715 		| BM_CS_STATUS_SPEED_1000))
   16716 		return 0;
   16717 
   16718 	delay(200 * 1000);	/* XXX too big */
   16719 
   16720 	/* Flush the packets in the fifo buffer */
   16721 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16722 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16723 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16724 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16725 
   16726 	return 0;
   16727 }
   16728 
   16729 static int
   16730 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16731 {
   16732 	int rv;
   16733 	uint16_t reg;
   16734 
   16735 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16736 	if (rv != 0)
   16737 		return rv;
   16738 
   16739 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16740 	    reg | HV_KMRN_MDIO_SLOW);
   16741 }
   16742 
   16743 /*
   16744  *  wm_configure_k1_ich8lan - Configure K1 power state
   16745  *  @sc: pointer to the HW structure
   16746  *  @enable: K1 state to configure
   16747  *
   16748  *  Configure the K1 power state based on the provided parameter.
   16749  *  Assumes semaphore already acquired.
   16750  */
   16751 static void
   16752 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16753 {
   16754 	uint32_t ctrl, ctrl_ext, tmp;
   16755 	uint16_t kmreg;
   16756 	int rv;
   16757 
   16758 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16759 
   16760 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16761 	if (rv != 0)
   16762 		return;
   16763 
   16764 	if (k1_enable)
   16765 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16766 	else
   16767 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16768 
   16769 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16770 	if (rv != 0)
   16771 		return;
   16772 
   16773 	delay(20);
   16774 
   16775 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16776 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16777 
   16778 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16779 	tmp |= CTRL_FRCSPD;
   16780 
   16781 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16782 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16783 	CSR_WRITE_FLUSH(sc);
   16784 	delay(20);
   16785 
   16786 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16787 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16788 	CSR_WRITE_FLUSH(sc);
   16789 	delay(20);
   16790 
   16791 	return;
   16792 }
   16793 
   16794 /* special case - for 82575 - need to do manual init ... */
   16795 static void
   16796 wm_reset_init_script_82575(struct wm_softc *sc)
   16797 {
   16798 	/*
   16799 	 * Remark: this is untested code - we have no board without EEPROM
   16800 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16801 	 */
   16802 
   16803 	/* SerDes configuration via SERDESCTRL */
   16804 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16805 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16806 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16807 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16808 
   16809 	/* CCM configuration via CCMCTL register */
   16810 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16811 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16812 
   16813 	/* PCIe lanes configuration */
   16814 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16815 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16816 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16817 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16818 
   16819 	/* PCIe PLL Configuration */
   16820 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16821 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16822 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16823 }
   16824 
   16825 static void
   16826 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16827 {
   16828 	uint32_t reg;
   16829 	uint16_t nvmword;
   16830 	int rv;
   16831 
   16832 	if (sc->sc_type != WM_T_82580)
   16833 		return;
   16834 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16835 		return;
   16836 
   16837 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16838 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16839 	if (rv != 0) {
   16840 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16841 		    __func__);
   16842 		return;
   16843 	}
   16844 
   16845 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16846 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16847 		reg |= MDICNFG_DEST;
   16848 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16849 		reg |= MDICNFG_COM_MDIO;
   16850 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16851 }
   16852 
   16853 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16854 
   16855 static bool
   16856 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16857 {
   16858 	uint32_t reg;
   16859 	uint16_t id1, id2;
   16860 	int i, rv;
   16861 
   16862 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16863 		device_xname(sc->sc_dev), __func__));
   16864 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16865 
   16866 	id1 = id2 = 0xffff;
   16867 	for (i = 0; i < 2; i++) {
   16868 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16869 		    &id1);
   16870 		if ((rv != 0) || MII_INVALIDID(id1))
   16871 			continue;
   16872 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16873 		    &id2);
   16874 		if ((rv != 0) || MII_INVALIDID(id2))
   16875 			continue;
   16876 		break;
   16877 	}
   16878 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16879 		goto out;
   16880 
   16881 	/*
   16882 	 * In case the PHY needs to be in mdio slow mode,
   16883 	 * set slow mode and try to get the PHY id again.
   16884 	 */
   16885 	rv = 0;
   16886 	if (sc->sc_type < WM_T_PCH_LPT) {
   16887 		sc->phy.release(sc);
   16888 		wm_set_mdio_slow_mode_hv(sc);
   16889 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16890 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16891 		sc->phy.acquire(sc);
   16892 	}
   16893 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16894 		device_printf(sc->sc_dev, "XXX return with false\n");
   16895 		return false;
   16896 	}
   16897 out:
   16898 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16899 		/* Only unforce SMBus if ME is not active */
   16900 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16901 			uint16_t phyreg;
   16902 
   16903 			/* Unforce SMBus mode in PHY */
   16904 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16905 			    CV_SMB_CTRL, &phyreg);
   16906 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16907 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16908 			    CV_SMB_CTRL, phyreg);
   16909 
   16910 			/* Unforce SMBus mode in MAC */
   16911 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16912 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16913 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16914 		}
   16915 	}
   16916 	return true;
   16917 }
   16918 
   16919 static void
   16920 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16921 {
   16922 	uint32_t reg;
   16923 	int i;
   16924 
   16925 	/* Set PHY Config Counter to 50msec */
   16926 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16927 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16928 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16929 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16930 
   16931 	/* Toggle LANPHYPC */
   16932 	reg = CSR_READ(sc, WMREG_CTRL);
   16933 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16934 	reg &= ~CTRL_LANPHYPC_VALUE;
   16935 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16936 	CSR_WRITE_FLUSH(sc);
   16937 	delay(1000);
   16938 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16939 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16940 	CSR_WRITE_FLUSH(sc);
   16941 
   16942 	if (sc->sc_type < WM_T_PCH_LPT)
   16943 		delay(50 * 1000);
   16944 	else {
   16945 		i = 20;
   16946 
   16947 		do {
   16948 			delay(5 * 1000);
   16949 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16950 		    && i--);
   16951 
   16952 		delay(30 * 1000);
   16953 	}
   16954 }
   16955 
   16956 static int
   16957 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16958 {
   16959 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16960 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16961 	uint32_t rxa;
   16962 	uint16_t scale = 0, lat_enc = 0;
   16963 	int32_t obff_hwm = 0;
   16964 	int64_t lat_ns, value;
   16965 
   16966 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   16967 		device_xname(sc->sc_dev), __func__));
   16968 
   16969 	if (link) {
   16970 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16971 		uint32_t status;
   16972 		uint16_t speed;
   16973 		pcireg_t preg;
   16974 
   16975 		status = CSR_READ(sc, WMREG_STATUS);
   16976 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16977 		case STATUS_SPEED_10:
   16978 			speed = 10;
   16979 			break;
   16980 		case STATUS_SPEED_100:
   16981 			speed = 100;
   16982 			break;
   16983 		case STATUS_SPEED_1000:
   16984 			speed = 1000;
   16985 			break;
   16986 		default:
   16987 			device_printf(sc->sc_dev, "Unknown speed "
   16988 			    "(status = %08x)\n", status);
   16989 			return -1;
   16990 		}
   16991 
   16992 		/* Rx Packet Buffer Allocation size (KB) */
   16993 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16994 
   16995 		/*
   16996 		 * Determine the maximum latency tolerated by the device.
   16997 		 *
   16998 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16999 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   17000 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   17001 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   17002 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   17003 		 */
   17004 		lat_ns = ((int64_t)rxa * 1024 -
   17005 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   17006 			+ ETHER_HDR_LEN))) * 8 * 1000;
   17007 		if (lat_ns < 0)
   17008 			lat_ns = 0;
   17009 		else
   17010 			lat_ns /= speed;
   17011 		value = lat_ns;
   17012 
   17013 		while (value > LTRV_VALUE) {
   17014 			scale ++;
   17015 			value = howmany(value, __BIT(5));
   17016 		}
   17017 		if (scale > LTRV_SCALE_MAX) {
   17018 			device_printf(sc->sc_dev,
   17019 			    "Invalid LTR latency scale %d\n", scale);
   17020 			return -1;
   17021 		}
   17022 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   17023 
   17024 		/* Determine the maximum latency tolerated by the platform */
   17025 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17026 		    WM_PCI_LTR_CAP_LPT);
   17027 		max_snoop = preg & 0xffff;
   17028 		max_nosnoop = preg >> 16;
   17029 
   17030 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   17031 
   17032 		if (lat_enc > max_ltr_enc) {
   17033 			lat_enc = max_ltr_enc;
   17034 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   17035 			    * PCI_LTR_SCALETONS(
   17036 				    __SHIFTOUT(lat_enc,
   17037 					PCI_LTR_MAXSNOOPLAT_SCALE));
   17038 		}
   17039 
   17040 		if (lat_ns) {
   17041 			lat_ns *= speed * 1000;
   17042 			lat_ns /= 8;
   17043 			lat_ns /= 1000000000;
   17044 			obff_hwm = (int32_t)(rxa - lat_ns);
   17045 		}
   17046 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   17047 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   17048 			    "(rxa = %d, lat_ns = %d)\n",
   17049 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   17050 			return -1;
   17051 		}
   17052 	}
   17053 	/* Snoop and No-Snoop latencies the same */
   17054 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   17055 	CSR_WRITE(sc, WMREG_LTRV, reg);
   17056 
   17057 	/* Set OBFF high water mark */
   17058 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   17059 	reg |= obff_hwm;
   17060 	CSR_WRITE(sc, WMREG_SVT, reg);
   17061 
   17062 	/* Enable OBFF */
   17063 	reg = CSR_READ(sc, WMREG_SVCR);
   17064 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   17065 	CSR_WRITE(sc, WMREG_SVCR, reg);
   17066 
   17067 	return 0;
   17068 }
   17069 
   17070 /*
   17071  * I210 Errata 25 and I211 Errata 10
   17072  * Slow System Clock.
   17073  *
   17074  * Note that this function is called on both FLASH and iNVM case on NetBSD.
   17075  */
   17076 static int
   17077 wm_pll_workaround_i210(struct wm_softc *sc)
   17078 {
   17079 	uint32_t mdicnfg, wuc;
   17080 	uint32_t reg;
   17081 	pcireg_t pcireg;
   17082 	uint32_t pmreg;
   17083 	uint16_t nvmword, tmp_nvmword;
   17084 	uint16_t phyval;
   17085 	bool wa_done = false;
   17086 	int i, rv = 0;
   17087 
   17088 	/* Get Power Management cap offset */
   17089 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   17090 	    &pmreg, NULL) == 0)
   17091 		return -1;
   17092 
   17093 	/* Save WUC and MDICNFG registers */
   17094 	wuc = CSR_READ(sc, WMREG_WUC);
   17095 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   17096 
   17097 	reg = mdicnfg & ~MDICNFG_DEST;
   17098 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   17099 
   17100 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
   17101 		/*
   17102 		 * The default value of the Initialization Control Word 1
   17103 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
   17104 		 */
   17105 		nvmword = INVM_DEFAULT_AL;
   17106 	}
   17107 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   17108 
   17109 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   17110 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   17111 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   17112 
   17113 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   17114 			rv = 0;
   17115 			break; /* OK */
   17116 		} else
   17117 			rv = -1;
   17118 
   17119 		wa_done = true;
   17120 		/* Directly reset the internal PHY */
   17121 		reg = CSR_READ(sc, WMREG_CTRL);
   17122 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   17123 
   17124 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   17125 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   17126 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   17127 
   17128 		CSR_WRITE(sc, WMREG_WUC, 0);
   17129 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   17130 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17131 
   17132 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   17133 		    pmreg + PCI_PMCSR);
   17134 		pcireg |= PCI_PMCSR_STATE_D3;
   17135 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17136 		    pmreg + PCI_PMCSR, pcireg);
   17137 		delay(1000);
   17138 		pcireg &= ~PCI_PMCSR_STATE_D3;
   17139 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   17140 		    pmreg + PCI_PMCSR, pcireg);
   17141 
   17142 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   17143 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   17144 
   17145 		/* Restore WUC register */
   17146 		CSR_WRITE(sc, WMREG_WUC, wuc);
   17147 	}
   17148 
   17149 	/* Restore MDICNFG setting */
   17150 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   17151 	if (wa_done)
   17152 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   17153 	return rv;
   17154 }
   17155 
   17156 static void
   17157 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   17158 {
   17159 	uint32_t reg;
   17160 
   17161 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
   17162 		device_xname(sc->sc_dev), __func__));
   17163 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   17164 	    || (sc->sc_type == WM_T_PCH_CNP));
   17165 
   17166 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   17167 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   17168 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   17169 
   17170 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   17171 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   17172 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   17173 }
   17174 
   17175 /* Sysctl functions */
   17176 static int
   17177 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
   17178 {
   17179 	struct sysctlnode node = *rnode;
   17180 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17181 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17182 	struct wm_softc *sc = txq->txq_sc;
   17183 	uint32_t reg;
   17184 
   17185 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
   17186 	node.sysctl_data = &reg;
   17187 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17188 }
   17189 
   17190 static int
   17191 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
   17192 {
   17193 	struct sysctlnode node = *rnode;
   17194 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
   17195 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   17196 	struct wm_softc *sc = txq->txq_sc;
   17197 	uint32_t reg;
   17198 
   17199 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
   17200 	node.sysctl_data = &reg;
   17201 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   17202 }
   17203 
   17204 #ifdef WM_DEBUG
   17205 static int
   17206 wm_sysctl_debug(SYSCTLFN_ARGS)
   17207 {
   17208 	struct sysctlnode node = *rnode;
   17209 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
   17210 	uint32_t dflags;
   17211 	int error;
   17212 
   17213 	dflags = sc->sc_debug;
   17214 	node.sysctl_data = &dflags;
   17215 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   17216 
   17217 	if (error || newp == NULL)
   17218 		return error;
   17219 
   17220 	sc->sc_debug = dflags;
   17221 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
   17222 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
   17223 
   17224 	return 0;
   17225 }
   17226 #endif
   17227